From 690a7c2938264b7feeeffdf0947853ac1b0f3549 Mon Sep 17 00:00:00 2001 From: Mustafa Gezen Date: Wed, 23 Aug 2023 02:58:08 +0200 Subject: [PATCH] govendor --- deps.bzl | 352 +- go.mod | 67 +- go.sum | 151 +- vendor.go | 2 + vendor/ariga.io/atlas/LICENSE | 202 + vendor/ariga.io/atlas/schemahcl/BUILD | 32 + vendor/ariga.io/atlas/schemahcl/context.go | 464 + vendor/ariga.io/atlas/schemahcl/extension.go | 671 + vendor/ariga.io/atlas/schemahcl/hcl.go | 685 + vendor/ariga.io/atlas/schemahcl/opts.go | 261 + vendor/ariga.io/atlas/schemahcl/spec.go | 498 + vendor/ariga.io/atlas/schemahcl/stdlib.go | 308 + vendor/ariga.io/atlas/schemahcl/types.go | 385 + .../atlas/sql/internal/specutil/BUILD | 23 + .../atlas/sql/internal/specutil/convert.go | 733 + .../atlas/sql/internal/specutil/spec.go | 136 + vendor/ariga.io/atlas/sql/internal/sqlx/BUILD | 22 + .../ariga.io/atlas/sql/internal/sqlx/dev.go | 132 + .../ariga.io/atlas/sql/internal/sqlx/diff.go | 577 + .../atlas/sql/internal/sqlx/exclude.go | 169 + .../ariga.io/atlas/sql/internal/sqlx/plan.go | 344 + .../ariga.io/atlas/sql/internal/sqlx/sqlx.go | 530 + vendor/ariga.io/atlas/sql/migrate/BUILD | 14 + vendor/ariga.io/atlas/sql/migrate/dir.go | 630 + vendor/ariga.io/atlas/sql/migrate/lex.go | 302 + vendor/ariga.io/atlas/sql/migrate/migrate.go | 975 + .../sql/migrate/testdata/migrate/atlas.sum | 3 + vendor/ariga.io/atlas/sql/mysql/BUILD | 29 + vendor/ariga.io/atlas/sql/mysql/convert.go | 264 + vendor/ariga.io/atlas/sql/mysql/diff.go | 637 + vendor/ariga.io/atlas/sql/mysql/driver.go | 373 + vendor/ariga.io/atlas/sql/mysql/inspect.go | 882 + .../sql/mysql/internal/mysqlversion/BUILD | 20 + .../mysql/internal/mysqlversion/is/.README.md | 14 + .../internal/mysqlversion/is/charset2collate | 1 + .../mysqlversion/is/charset2collate.maria | 1 + .../internal/mysqlversion/is/collate2charset | 1 + .../mysqlversion/is/collate2charset.maria | 1 + .../internal/mysqlversion/mysqlversion.go | 150 + vendor/ariga.io/atlas/sql/mysql/migrate.go | 802 + vendor/ariga.io/atlas/sql/mysql/sqlspec.go | 492 + vendor/ariga.io/atlas/sql/mysql/tidb.go | 288 + vendor/ariga.io/atlas/sql/postgres/BUILD | 29 + vendor/ariga.io/atlas/sql/postgres/convert.go | 473 + vendor/ariga.io/atlas/sql/postgres/crdb.go | 343 + vendor/ariga.io/atlas/sql/postgres/diff.go | 427 + vendor/ariga.io/atlas/sql/postgres/driver.go | 458 + vendor/ariga.io/atlas/sql/postgres/inspect.go | 1277 + .../sql/postgres/internal/postgresop/BUILD | 12 + .../internal/postgresop/postgresop.go | 194 + vendor/ariga.io/atlas/sql/postgres/migrate.go | 1343 + vendor/ariga.io/atlas/sql/postgres/sqlspec.go | 838 + vendor/ariga.io/atlas/sql/schema/BUILD | 15 + .../atlas/sql/schema/changekind_string.go | 55 + vendor/ariga.io/atlas/sql/schema/dsl.go | 774 + vendor/ariga.io/atlas/sql/schema/inspect.go | 121 + vendor/ariga.io/atlas/sql/schema/migrate.go | 457 + vendor/ariga.io/atlas/sql/schema/schema.go | 339 + vendor/ariga.io/atlas/sql/sqlclient/BUILD | 14 + vendor/ariga.io/atlas/sql/sqlclient/client.go | 404 + vendor/ariga.io/atlas/sql/sqlite/BUILD | 27 + vendor/ariga.io/atlas/sql/sqlite/convert.go | 107 + vendor/ariga.io/atlas/sql/sqlite/diff.go | 174 + vendor/ariga.io/atlas/sql/sqlite/driver.go | 324 + vendor/ariga.io/atlas/sql/sqlite/inspect.go | 727 + vendor/ariga.io/atlas/sql/sqlite/migrate.go | 581 + vendor/ariga.io/atlas/sql/sqlite/sqlspec.go | 233 + vendor/ariga.io/atlas/sql/sqlspec/BUILD | 13 + vendor/ariga.io/atlas/sql/sqlspec/sqlspec.go | 89 + vendor/ariga.io/atlas/sql/sqltool/BUILD | 15 + vendor/ariga.io/atlas/sql/sqltool/doc.go | 6 + vendor/ariga.io/atlas/sql/sqltool/hidden.go | 13 + .../atlas/sql/sqltool/hidden_windows.go | 26 + vendor/ariga.io/atlas/sql/sqltool/tool.go | 547 + vendor/entgo.io/ent/.all-contributorsrc | 855 + vendor/entgo.io/ent/.golangci.yml | 73 + vendor/entgo.io/ent/BUILD | 18 + vendor/entgo.io/ent/CODE_OF_CONDUCT.md | 77 + vendor/entgo.io/ent/CONTRIBUTING.md | 70 + vendor/entgo.io/ent/LICENSE | 202 + vendor/entgo.io/ent/README.md | 58 + vendor/entgo.io/ent/README_jp.md | 54 + vendor/entgo.io/ent/README_kr.md | 52 + vendor/entgo.io/ent/README_zh.md | 44 + vendor/entgo.io/ent/dialect/BUILD | 10 + vendor/entgo.io/ent/dialect/dialect.go | 208 + vendor/entgo.io/ent/dialect/entsql/BUILD | 10 + .../entgo.io/ent/dialect/entsql/annotation.go | 685 + vendor/entgo.io/ent/dialect/sql/BUILD | 15 + vendor/entgo.io/ent/dialect/sql/builder.go | 3996 + vendor/entgo.io/ent/dialect/sql/driver.go | 184 + vendor/entgo.io/ent/dialect/sql/scan.go | 420 + vendor/entgo.io/ent/dialect/sql/schema/BUILD | 31 + .../entgo.io/ent/dialect/sql/schema/atlas.go | 1202 + .../ent/dialect/sql/schema/inspect.go | 95 + .../ent/dialect/sql/schema/migrate.go | 660 + .../entgo.io/ent/dialect/sql/schema/mysql.go | 997 + .../ent/dialect/sql/schema/postgres.go | 851 + .../entgo.io/ent/dialect/sql/schema/schema.go | 690 + .../entgo.io/ent/dialect/sql/schema/sqlite.go | 528 + .../entgo.io/ent/dialect/sql/schema/writer.go | 365 + vendor/entgo.io/ent/dialect/sql/sql.go | 334 + .../entgo.io/ent/dialect/sql/sqlgraph/BUILD | 19 + .../ent/dialect/sql/sqlgraph/entql.go | 334 + .../ent/dialect/sql/sqlgraph/errors.go | 47 + .../ent/dialect/sql/sqlgraph/graph.go | 1964 + vendor/entgo.io/ent/dialect/sql/sqljson/BUILD | 16 + .../ent/dialect/sql/sqljson/dialect.go | 222 + .../ent/dialect/sql/sqljson/sqljson.go | 720 + vendor/entgo.io/ent/ent.go | 537 + vendor/entgo.io/ent/entql/BUILD | 12 + vendor/entgo.io/ent/entql/entql.go | 464 + vendor/entgo.io/ent/entql/types.go | 1980 + vendor/entgo.io/ent/op_string.go | 43 + vendor/entgo.io/ent/schema/BUILD | 9 + vendor/entgo.io/ent/schema/edge/BUILD | 13 + vendor/entgo.io/ent/schema/edge/annotation.go | 49 + vendor/entgo.io/ent/schema/edge/edge.go | 276 + vendor/entgo.io/ent/schema/field/BUILD | 15 + .../entgo.io/ent/schema/field/annotation.go | 81 + vendor/entgo.io/ent/schema/field/field.go | 1457 + vendor/entgo.io/ent/schema/field/numeric.go | 2274 + vendor/entgo.io/ent/schema/field/type.go | 249 + vendor/entgo.io/ent/schema/index/BUILD | 10 + vendor/entgo.io/ent/schema/index/index.go | 121 + vendor/entgo.io/ent/schema/schema.go | 43 + .../AppsFlyer/go-sundheit/.gitignore | 19 + vendor/github.com/AppsFlyer/go-sundheit/BUILD | 20 + .../github.com/AppsFlyer/go-sundheit/LICENSE | 201 + .../github.com/AppsFlyer/go-sundheit/Makefile | 19 + .../AppsFlyer/go-sundheit/README.md | 405 + .../github.com/AppsFlyer/go-sundheit/check.go | 14 + .../AppsFlyer/go-sundheit/check_listener.go | 39 + .../AppsFlyer/go-sundheit/check_task.go | 36 + .../AppsFlyer/go-sundheit/checks/BUILD | 19 + .../AppsFlyer/go-sundheit/checks/custom.go | 31 + .../AppsFlyer/go-sundheit/checks/dns.go | 52 + .../AppsFlyer/go-sundheit/checks/http.go | 144 + .../AppsFlyer/go-sundheit/checks/must.go | 14 + .../AppsFlyer/go-sundheit/checks/ping.go | 51 + .../AppsFlyer/go-sundheit/config.go | 21 + .../AppsFlyer/go-sundheit/health.go | 243 + .../AppsFlyer/go-sundheit/health_listener.go | 13 + .../AppsFlyer/go-sundheit/http/BUILD | 10 + .../AppsFlyer/go-sundheit/http/handler.go | 49 + .../AppsFlyer/go-sundheit/options.go | 120 + .../github.com/AppsFlyer/go-sundheit/types.go | 67 + .../github.com/AppsFlyer/go-sundheit/utils.go | 19 + .../github.com/Azure/go-ntlmssp/.travis.yml | 17 + vendor/github.com/Azure/go-ntlmssp/BUILD | 23 + vendor/github.com/Azure/go-ntlmssp/LICENSE | 21 + vendor/github.com/Azure/go-ntlmssp/README.md | 29 + .../github.com/Azure/go-ntlmssp/SECURITY.md | 41 + .../Azure/go-ntlmssp/authenticate_message.go | 187 + .../github.com/Azure/go-ntlmssp/authheader.go | 66 + vendor/github.com/Azure/go-ntlmssp/avids.go | 17 + .../Azure/go-ntlmssp/challenge_message.go | 82 + .../Azure/go-ntlmssp/messageheader.go | 21 + .../Azure/go-ntlmssp/negotiate_flags.go | 52 + .../Azure/go-ntlmssp/negotiate_message.go | 64 + .../github.com/Azure/go-ntlmssp/negotiator.go | 151 + vendor/github.com/Azure/go-ntlmssp/nlmp.go | 51 + vendor/github.com/Azure/go-ntlmssp/unicode.go | 29 + .../github.com/Azure/go-ntlmssp/varfield.go | 40 + vendor/github.com/Azure/go-ntlmssp/version.go | 20 + .../Masterminds/goutils/.travis.yml | 18 + vendor/github.com/Masterminds/goutils/BUILD | 14 + .../Masterminds/goutils/CHANGELOG.md | 8 + .../Masterminds/goutils/LICENSE.txt | 202 + .../github.com/Masterminds/goutils/README.md | 70 + .../Masterminds/goutils/appveyor.yml | 21 + .../goutils/cryptorandomstringutils.go | 230 + .../Masterminds/goutils/randomstringutils.go | 248 + .../Masterminds/goutils/stringutils.go | 240 + .../Masterminds/goutils/wordutils.go | 357 + .../github.com/Masterminds/semver/.travis.yml | 29 + vendor/github.com/Masterminds/semver/BUILD | 14 + .../Masterminds/semver/CHANGELOG.md | 109 + .../github.com/Masterminds/semver/LICENSE.txt | 19 + vendor/github.com/Masterminds/semver/Makefile | 36 + .../github.com/Masterminds/semver/README.md | 194 + .../Masterminds/semver/appveyor.yml | 44 + .../Masterminds/semver/collection.go | 24 + .../Masterminds/semver/constraints.go | 423 + vendor/github.com/Masterminds/semver/doc.go | 115 + .../Masterminds/semver/v3/.gitignore | 1 + .../Masterminds/semver/v3/.golangci.yml | 30 + vendor/github.com/Masterminds/semver/v3/BUILD | 14 + .../Masterminds/semver/v3/CHANGELOG.md | 214 + .../Masterminds/semver/v3/LICENSE.txt | 19 + .../github.com/Masterminds/semver/v3/Makefile | 37 + .../Masterminds/semver/v3/README.md | 244 + .../Masterminds/semver/v3/collection.go | 24 + .../Masterminds/semver/v3/constraints.go | 594 + .../github.com/Masterminds/semver/v3/doc.go | 184 + .../github.com/Masterminds/semver/v3/fuzz.go | 22 + .../Masterminds/semver/v3/version.go | 639 + .../github.com/Masterminds/semver/version.go | 425 + .../Masterminds/semver/version_fuzz.go | 10 + .../Masterminds/sprig/v3/.gitignore | 2 + vendor/github.com/Masterminds/sprig/v3/BUILD | 36 + .../Masterminds/sprig/v3/CHANGELOG.md | 383 + .../Masterminds/sprig/v3/LICENSE.txt | 19 + .../github.com/Masterminds/sprig/v3/Makefile | 9 + .../github.com/Masterminds/sprig/v3/README.md | 100 + .../github.com/Masterminds/sprig/v3/crypto.go | 653 + .../github.com/Masterminds/sprig/v3/date.go | 152 + .../Masterminds/sprig/v3/defaults.go | 163 + .../github.com/Masterminds/sprig/v3/dict.go | 174 + vendor/github.com/Masterminds/sprig/v3/doc.go | 19 + .../Masterminds/sprig/v3/functions.go | 382 + .../github.com/Masterminds/sprig/v3/list.go | 464 + .../Masterminds/sprig/v3/network.go | 12 + .../Masterminds/sprig/v3/numeric.go | 186 + .../Masterminds/sprig/v3/reflect.go | 28 + .../github.com/Masterminds/sprig/v3/regex.go | 83 + .../github.com/Masterminds/sprig/v3/semver.go | 23 + .../Masterminds/sprig/v3/strings.go | 236 + vendor/github.com/Masterminds/sprig/v3/url.go | 66 + .../github.com/agext/levenshtein/.gitignore | 2 + .../github.com/agext/levenshtein/.travis.yml | 70 + vendor/github.com/agext/levenshtein/BUILD | 12 + vendor/github.com/agext/levenshtein/DCO | 36 + vendor/github.com/agext/levenshtein/LICENSE | 201 + .../github.com/agext/levenshtein/MAINTAINERS | 1 + vendor/github.com/agext/levenshtein/NOTICE | 5 + vendor/github.com/agext/levenshtein/README.md | 38 + .../agext/levenshtein/levenshtein.go | 290 + vendor/github.com/agext/levenshtein/params.go | 152 + .../apparentlymart/go-textseg/v13/LICENSE | 95 + .../go-textseg/v13/textseg/BUILD | 15 + .../go-textseg/v13/textseg/all_tokens.go | 30 + .../go-textseg/v13/textseg/emoji_table.rl | 525 + .../go-textseg/v13/textseg/generate.go | 8 + .../v13/textseg/grapheme_clusters.go | 4138 + .../v13/textseg/grapheme_clusters.rl | 133 + .../v13/textseg/grapheme_clusters_table.rl | 1609 + .../go-textseg/v13/textseg/tables.go | 5833 + .../go-textseg/v13/textseg/unicode2ragel.rb | 335 + .../go-textseg/v13/textseg/utf8_seqs.go | 19 + vendor/github.com/beevik/etree/BUILD | 13 + vendor/github.com/beevik/etree/CONTRIBUTORS | 12 + vendor/github.com/beevik/etree/LICENSE | 24 + vendor/github.com/beevik/etree/README.md | 204 + .../github.com/beevik/etree/RELEASE_NOTES.md | 153 + vendor/github.com/beevik/etree/etree.go | 1666 + vendor/github.com/beevik/etree/helpers.go | 394 + vendor/github.com/beevik/etree/path.go | 586 + .../github.com/coreos/go-oidc/v3/oidc/BUILD | 2 +- .../github.com/coreos/go-oidc/v3/oidc/jose.go | 1 + .../github.com/coreos/go-oidc/v3/oidc/jwks.go | 52 +- .../github.com/coreos/go-oidc/v3/oidc/oidc.go | 163 +- .../coreos/go-oidc/v3/oidc/verify.go | 100 +- vendor/github.com/coreos/go-semver/LICENSE | 202 + vendor/github.com/coreos/go-semver/NOTICE | 5 + .../github.com/coreos/go-semver/semver/BUILD | 12 + .../coreos/go-semver/semver/semver.go | 296 + .../coreos/go-semver/semver/sort.go | 38 + .../github.com/coreos/go-systemd/v22/LICENSE | 191 + .../github.com/coreos/go-systemd/v22/NOTICE | 5 + .../coreos/go-systemd/v22/journal/BUILD | 13 + .../coreos/go-systemd/v22/journal/journal.go | 46 + .../go-systemd/v22/journal/journal_unix.go | 210 + .../go-systemd/v22/journal/journal_windows.go | 35 + vendor/github.com/dexidp/dex/LICENSE | 202 + vendor/github.com/dexidp/dex/api/v2/BUILD | 19 + vendor/github.com/dexidp/dex/api/v2/LICENSE | 202 + vendor/github.com/dexidp/dex/api/v2/api.pb.go | 2100 + vendor/github.com/dexidp/dex/api/v2/api.proto | 202 + .../dexidp/dex/api/v2/api_grpc.pb.go | 544 + vendor/github.com/dexidp/dex/cmd/dex/BUILD | 47 + .../github.com/dexidp/dex/cmd/dex/config.go | 356 + vendor/github.com/dexidp/dex/cmd/dex/main.go | 28 + vendor/github.com/dexidp/dex/cmd/dex/serve.go | 684 + .../github.com/dexidp/dex/cmd/dex/version.go | 26 + vendor/github.com/dexidp/dex/connector/BUILD | 9 + .../dexidp/dex/connector/atlassiancrowd/BUILD | 14 + .../atlassiancrowd/atlassiancrowd.go | 448 + .../dexidp/dex/connector/authproxy/BUILD | 13 + .../dex/connector/authproxy/authproxy.go | 85 + .../dexidp/dex/connector/bitbucketcloud/BUILD | 16 + .../bitbucketcloud/bitbucketcloud.go | 468 + .../dexidp/dex/connector/connector.go | 105 + .../dexidp/dex/connector/gitea/BUILD | 14 + .../dexidp/dex/connector/gitea/gitea.go | 424 + .../dexidp/dex/connector/github/BUILD | 17 + .../dexidp/dex/connector/github/github.go | 733 + .../dexidp/dex/connector/gitlab/BUILD | 15 + .../dexidp/dex/connector/gitlab/gitlab.go | 310 + .../dexidp/dex/connector/google/BUILD | 20 + .../dexidp/dex/connector/google/google.go | 385 + .../dexidp/dex/connector/keystone/BUILD | 13 + .../dexidp/dex/connector/keystone/keystone.go | 312 + .../dexidp/dex/connector/ldap/BUILD | 14 + .../dexidp/dex/connector/ldap/gen-certs.sh | 49 + .../dexidp/dex/connector/ldap/ldap.go | 640 + .../dexidp/dex/connector/linkedin/BUILD | 14 + .../dexidp/dex/connector/linkedin/linkedin.go | 242 + .../dexidp/dex/connector/microsoft/BUILD | 15 + .../dex/connector/microsoft/microsoft.go | 521 + .../dexidp/dex/connector/mock/BUILD | 13 + .../dex/connector/mock/connectortest.go | 124 + .../dexidp/dex/connector/oauth/BUILD | 15 + .../dexidp/dex/connector/oauth/oauth.go | 260 + .../dexidp/dex/connector/oidc/BUILD | 16 + .../dexidp/dex/connector/oidc/oidc.go | 458 + .../dexidp/dex/connector/openshift/BUILD | 17 + .../dex/connector/openshift/openshift.go | 267 + .../dexidp/dex/connector/saml/BUILD | 22 + .../dexidp/dex/connector/saml/saml.go | 644 + .../dexidp/dex/connector/saml/types.go | 277 + vendor/github.com/dexidp/dex/pkg/groups/BUILD | 9 + .../dexidp/dex/pkg/groups/groups.go | 18 + .../dexidp/dex/pkg/httpclient/BUILD | 9 + .../dexidp/dex/pkg/httpclient/httpclient.go | 45 + .../dexidp/dex/pkg/httpclient/readme.md | 44 + vendor/github.com/dexidp/dex/pkg/log/BUILD | 12 + .../dexidp/dex/pkg/log/deprecated.go | 5 + .../github.com/dexidp/dex/pkg/log/logger.go | 18 + vendor/github.com/dexidp/dex/server/BUILD | 53 + vendor/github.com/dexidp/dex/server/api.go | 387 + .../dexidp/dex/server/deviceflowhandlers.go | 444 + vendor/github.com/dexidp/dex/server/doc.go | 2 + .../github.com/dexidp/dex/server/handlers.go | 1469 + .../dexidp/dex/server/internal/BUILD | 20 + .../dexidp/dex/server/internal/codec.go | 25 + .../dexidp/dex/server/internal/types.pb.go | 232 + .../dexidp/dex/server/internal/types.proto | 19 + vendor/github.com/dexidp/dex/server/oauth2.go | 702 + .../dexidp/dex/server/refreshhandlers.go | 387 + .../github.com/dexidp/dex/server/rotation.go | 249 + vendor/github.com/dexidp/dex/server/server.go | 657 + .../github.com/dexidp/dex/server/templates.go | 370 + vendor/github.com/dexidp/dex/storage/BUILD | 18 + vendor/github.com/dexidp/dex/storage/doc.go | 2 + .../github.com/dexidp/dex/storage/ent/BUILD | 26 + .../dexidp/dex/storage/ent/client/BUILD | 34 + .../dexidp/dex/storage/ent/client/authcode.go | 52 + .../dex/storage/ent/client/authrequest.go | 109 + .../dexidp/dex/storage/ent/client/client.go | 92 + .../dex/storage/ent/client/connector.go | 88 + .../dex/storage/ent/client/devicerequest.go | 36 + .../dex/storage/ent/client/devicetoken.go | 80 + .../dexidp/dex/storage/ent/client/keys.go | 81 + .../dexidp/dex/storage/ent/client/main.go | 110 + .../dex/storage/ent/client/offlinesession.go | 93 + .../dexidp/dex/storage/ent/client/password.go | 100 + .../dex/storage/ent/client/refreshtoken.go | 111 + .../dexidp/dex/storage/ent/client/types.go | 173 + .../dexidp/dex/storage/ent/client/utils.go | 44 + .../dexidp/dex/storage/ent/db/BUILD | 88 + .../dexidp/dex/storage/ent/db/authcode.go | 269 + .../dexidp/dex/storage/ent/db/authcode/BUILD | 16 + .../dex/storage/ent/db/authcode/authcode.go | 169 + .../dex/storage/ent/db/authcode/where.go | 932 + .../dex/storage/ent/db/authcode_create.go | 446 + .../dex/storage/ent/db/authcode_delete.go | 88 + .../dex/storage/ent/db/authcode_query.go | 526 + .../dex/storage/ent/db/authcode_update.go | 679 + .../dexidp/dex/storage/ent/db/authrequest.go | 326 + .../dex/storage/ent/db/authrequest/BUILD | 16 + .../storage/ent/db/authrequest/authrequest.go | 185 + .../dex/storage/ent/db/authrequest/where.go | 1087 + .../dex/storage/ent/db/authrequest_create.go | 473 + .../dex/storage/ent/db/authrequest_delete.go | 88 + .../dex/storage/ent/db/authrequest_query.go | 526 + .../dex/storage/ent/db/authrequest_update.go | 723 + .../dexidp/dex/storage/ent/db/client.go | 1462 + .../dexidp/dex/storage/ent/db/connector.go | 136 + .../dexidp/dex/storage/ent/db/connector/BUILD | 16 + .../dex/storage/ent/db/connector/connector.go | 75 + .../dex/storage/ent/db/connector/where.go | 350 + .../dex/storage/ent/db/connector_create.go | 244 + .../dex/storage/ent/db/connector_delete.go | 88 + .../dex/storage/ent/db/connector_query.go | 526 + .../dex/storage/ent/db/connector_update.go | 283 + .../dex/storage/ent/db/devicerequest.go | 166 + .../dex/storage/ent/db/devicerequest/BUILD | 16 + .../ent/db/devicerequest/devicerequest.go | 93 + .../dex/storage/ent/db/devicerequest/where.go | 422 + .../storage/ent/db/devicerequest_create.go | 262 + .../storage/ent/db/devicerequest_delete.go | 88 + .../dex/storage/ent/db/devicerequest_query.go | 526 + .../storage/ent/db/devicerequest_update.go | 381 + .../dexidp/dex/storage/ent/db/devicetoken.go | 187 + .../dex/storage/ent/db/devicetoken/BUILD | 16 + .../storage/ent/db/devicetoken/devicetoken.go | 109 + .../dex/storage/ent/db/devicetoken/where.go | 557 + .../dex/storage/ent/db/devicetoken_create.go | 308 + .../dex/storage/ent/db/devicetoken_delete.go | 88 + .../dex/storage/ent/db/devicetoken_query.go | 526 + .../dex/storage/ent/db/devicetoken_update.go | 426 + .../dexidp/dex/storage/ent/db/ent.go | 626 + .../dexidp/dex/storage/ent/db/keys.go | 148 + .../dexidp/dex/storage/ent/db/keys/BUILD | 16 + .../dexidp/dex/storage/ent/db/keys/keys.go | 61 + .../dexidp/dex/storage/ent/db/keys/where.go | 142 + .../dexidp/dex/storage/ent/db/keys_create.go | 237 + .../dexidp/dex/storage/ent/db/keys_delete.go | 88 + .../dexidp/dex/storage/ent/db/keys_query.go | 526 + .../dexidp/dex/storage/ent/db/keys_update.go | 273 + .../dexidp/dex/storage/ent/db/migrate/BUILD | 17 + .../dex/storage/ent/db/migrate/migrate.go | 64 + .../dex/storage/ent/db/migrate/schema.go | 213 + .../dexidp/dex/storage/ent/db/mutation.go | 7982 + .../dexidp/dex/storage/ent/db/oauth2client.go | 165 + .../dex/storage/ent/db/oauth2client/BUILD | 16 + .../ent/db/oauth2client/oauth2client.go | 88 + .../dex/storage/ent/db/oauth2client/where.go | 340 + .../dex/storage/ent/db/oauth2client_create.go | 269 + .../dex/storage/ent/db/oauth2client_delete.go | 88 + .../dex/storage/ent/db/oauth2client_query.go | 526 + .../dex/storage/ent/db/oauth2client_update.go | 410 + .../dex/storage/ent/db/offlinesession.go | 138 + .../dex/storage/ent/db/offlinesession/BUILD | 16 + .../ent/db/offlinesession/offlinesession.go | 70 + .../storage/ent/db/offlinesession/where.go | 335 + .../storage/ent/db/offlinesession_create.go | 241 + .../storage/ent/db/offlinesession_delete.go | 88 + .../storage/ent/db/offlinesession_query.go | 526 + .../storage/ent/db/offlinesession_update.go | 301 + .../dexidp/dex/storage/ent/db/password.go | 138 + .../dexidp/dex/storage/ent/db/password/BUILD | 16 + .../dex/storage/ent/db/password/password.go | 75 + .../dex/storage/ent/db/password/where.go | 340 + .../dex/storage/ent/db/password_create.go | 233 + .../dex/storage/ent/db/password_delete.go | 88 + .../dex/storage/ent/db/password_query.go | 526 + .../dex/storage/ent/db/password_update.go | 293 + .../dexidp/dex/storage/ent/db/predicate/BUILD | 10 + .../dex/storage/ent/db/predicate/predicate.go | 37 + .../dexidp/dex/storage/ent/db/refreshtoken.go | 269 + .../dex/storage/ent/db/refreshtoken/BUILD | 16 + .../ent/db/refreshtoken/refreshtoken.go | 173 + .../dex/storage/ent/db/refreshtoken/where.go | 907 + .../dex/storage/ent/db/refreshtoken_create.go | 465 + .../dex/storage/ent/db/refreshtoken_delete.go | 88 + .../dex/storage/ent/db/refreshtoken_query.go | 526 + .../dex/storage/ent/db/refreshtoken_update.go | 701 + .../dexidp/dex/storage/ent/db/runtime.go | 269 + .../dexidp/dex/storage/ent/db/tx.go | 237 + .../dexidp/dex/storage/ent/generate.go | 3 + .../dexidp/dex/storage/ent/mysql.go | 158 + .../dexidp/dex/storage/ent/postgres.go | 154 + .../dexidp/dex/storage/ent/schema/BUILD | 28 + .../dexidp/dex/storage/ent/schema/authcode.go | 90 + .../dex/storage/ent/schema/authrequest.go | 97 + .../dexidp/dex/storage/ent/schema/client.go | 54 + .../dex/storage/ent/schema/connector.go | 47 + .../dex/storage/ent/schema/devicerequest.go | 51 + .../dex/storage/ent/schema/devicetoken.go | 55 + .../dexidp/dex/storage/ent/schema/dialects.go | 21 + .../dexidp/dex/storage/ent/schema/keys.go | 45 + .../dex/storage/ent/schema/offlinesession.go | 46 + .../dexidp/dex/storage/ent/schema/password.go | 44 + .../dex/storage/ent/schema/refreshtoken.go | 95 + .../dexidp/dex/storage/ent/sqlite.go | 61 + .../dexidp/dex/storage/ent/types.go | 25 + .../dexidp/dex/storage/ent/utils.go | 10 + .../github.com/dexidp/dex/storage/etcd/BUILD | 21 + .../dexidp/dex/storage/etcd/config.go | 91 + .../dexidp/dex/storage/etcd/etcd.go | 646 + .../dexidp/dex/storage/etcd/types.go | 318 + .../github.com/dexidp/dex/storage/health.go | 32 + .../dexidp/dex/storage/kubernetes/BUILD | 25 + .../dexidp/dex/storage/kubernetes/client.go | 597 + .../dexidp/dex/storage/kubernetes/doc.go | 2 + .../dex/storage/kubernetes/k8sapi/BUILD | 17 + .../dex/storage/kubernetes/k8sapi/client.go | 147 + .../kubernetes/k8sapi/crd_extensions.go | 176 + .../dex/storage/kubernetes/k8sapi/doc.go | 2 + .../storage/kubernetes/k8sapi/extensions.go | 23 + .../dex/storage/kubernetes/k8sapi/time.go | 138 + .../storage/kubernetes/k8sapi/unversioned.go | 52 + .../dex/storage/kubernetes/k8sapi/v1.go | 162 + .../dexidp/dex/storage/kubernetes/lock.go | 124 + .../dexidp/dex/storage/kubernetes/storage.go | 769 + .../dex/storage/kubernetes/transport.go | 123 + .../dexidp/dex/storage/kubernetes/types.go | 854 + .../dexidp/dex/storage/memory/BUILD | 13 + .../dexidp/dex/storage/memory/memory.go | 540 + .../github.com/dexidp/dex/storage/sql/BUILD | 22 + .../dexidp/dex/storage/sql/config.go | 344 + .../github.com/dexidp/dex/storage/sql/crud.go | 1020 + .../dexidp/dex/storage/sql/migrate.go | 301 + .../github.com/dexidp/dex/storage/sql/sql.go | 198 + .../dexidp/dex/storage/sql/sqlite.go | 53 + .../github.com/dexidp/dex/storage/static.go | 232 + .../github.com/dexidp/dex/storage/storage.go | 444 + vendor/github.com/dexidp/dex/web/BUILD | 41 + vendor/github.com/dexidp/dex/web/robots.txt | 2 + .../web/static/img/atlassian-crowd-icon.svg | 17 + .../dex/web/static/img/bitbucket-icon.svg | 5 + .../dexidp/dex/web/static/img/email-icon.svg | 12 + .../dexidp/dex/web/static/img/gitea-icon.svg | 1 + .../dexidp/dex/web/static/img/github-icon.svg | 5 + .../dexidp/dex/web/static/img/gitlab-icon.svg | 53 + .../dexidp/dex/web/static/img/google-icon.svg | 16 + .../dex/web/static/img/keystone-icon.svg | 12 + .../dexidp/dex/web/static/img/ldap-icon.svg | 12 + .../dex/web/static/img/linkedin-icon.svg | 1 + .../dex/web/static/img/microsoft-icon.svg | 9 + .../dexidp/dex/web/static/img/oidc-icon.svg | 156 + .../dexidp/dex/web/static/img/saml-icon.svg | 12 + .../github.com/dexidp/dex/web/static/main.css | 148 + .../dexidp/dex/web/templates/approval.html | 44 + .../dexidp/dex/web/templates/device.html | 23 + .../dex/web/templates/device_success.html | 8 + .../dexidp/dex/web/templates/error.html | 8 + .../dexidp/dex/web/templates/footer.html | 3 + .../dexidp/dex/web/templates/header.html | 20 + .../dexidp/dex/web/templates/login.html | 19 + .../dexidp/dex/web/templates/oob.html | 9 + .../dexidp/dex/web/templates/password.html | 35 + .../dexidp/dex/web/themes/dark/favicon.png | Bin 0 -> 10415 bytes .../dexidp/dex/web/themes/dark/logo.png | Bin 0 -> 24387 bytes .../dexidp/dex/web/themes/dark/styles.css | 122 + .../dexidp/dex/web/themes/light/favicon.png | Bin 0 -> 10415 bytes .../dexidp/dex/web/themes/light/logo.png | Bin 0 -> 24829 bytes .../dexidp/dex/web/themes/light/styles.css | 113 + vendor/github.com/dexidp/dex/web/web.go | 14 + .../github.com/felixge/httpsnoop/.gitignore | 0 .../github.com/felixge/httpsnoop/.travis.yml | 6 + vendor/github.com/felixge/httpsnoop/BUILD | 14 + .../github.com/felixge/httpsnoop/LICENSE.txt | 19 + vendor/github.com/felixge/httpsnoop/Makefile | 10 + vendor/github.com/felixge/httpsnoop/README.md | 95 + .../felixge/httpsnoop/capture_metrics.go | 86 + vendor/github.com/felixge/httpsnoop/docs.go | 10 + .../httpsnoop/wrap_generated_gteq_1.8.go | 436 + .../httpsnoop/wrap_generated_lt_1.8.go | 278 + vendor/github.com/ghodss/yaml/.gitignore | 20 + vendor/github.com/ghodss/yaml/.travis.yml | 7 + vendor/github.com/ghodss/yaml/BUILD | 13 + vendor/github.com/ghodss/yaml/LICENSE | 50 + vendor/github.com/ghodss/yaml/README.md | 121 + vendor/github.com/ghodss/yaml/fields.go | 501 + vendor/github.com/ghodss/yaml/yaml.go | 277 + vendor/github.com/go-asn1-ber/asn1-ber/BUILD | 18 + .../github.com/go-asn1-ber/asn1-ber/LICENSE | 22 + .../github.com/go-asn1-ber/asn1-ber/README.md | 24 + vendor/github.com/go-asn1-ber/asn1-ber/ber.go | 620 + .../go-asn1-ber/asn1-ber/content_int.go | 25 + .../go-asn1-ber/asn1-ber/generalizedTime.go | 105 + .../github.com/go-asn1-ber/asn1-ber/header.go | 38 + .../go-asn1-ber/asn1-ber/identifier.go | 112 + .../github.com/go-asn1-ber/asn1-ber/length.go | 81 + .../github.com/go-asn1-ber/asn1-ber/real.go | 157 + .../github.com/go-asn1-ber/asn1-ber/util.go | 24 + .../github.com/go-jose/go-jose/v3/.gitignore | 2 + .../go-jose/go-jose/v3/.golangci.yml | 53 + .../github.com/go-jose/go-jose/v3/.travis.yml | 33 + .../go-jose/go-jose/v3/BUG-BOUNTY.md | 10 + vendor/github.com/go-jose/go-jose/v3/BUILD | 26 + .../go-jose/go-jose/v3/CONTRIBUTING.md | 15 + vendor/github.com/go-jose/go-jose/v3/LICENSE | 202 + .../github.com/go-jose/go-jose/v3/README.md | 122 + .../go-jose/go-jose/v3/asymmetric.go | 592 + .../go-jose/go-jose/v3/cipher/BUILD | 14 + .../go-jose/go-jose/v3/cipher/cbc_hmac.go | 196 + .../go-jose/go-jose/v3/cipher/concat_kdf.go | 75 + .../go-jose/go-jose/v3/cipher/ecdh_es.go | 86 + .../go-jose/go-jose/v3/cipher/key_wrap.go | 109 + .../github.com/go-jose/go-jose/v3/crypter.go | 544 + vendor/github.com/go-jose/go-jose/v3/doc.go | 27 + .../github.com/go-jose/go-jose/v3/encoding.go | 191 + .../github.com/go-jose/go-jose/v3/json/BUILD | 16 + .../go-jose/go-jose/v3/json/LICENSE | 27 + .../go-jose/go-jose/v3/json/README.md | 13 + .../go-jose/go-jose/v3/json/decode.go | 1217 + .../go-jose/go-jose/v3/json/encode.go | 1197 + .../go-jose/go-jose/v3/json/indent.go | 141 + .../go-jose/go-jose/v3/json/scanner.go | 623 + .../go-jose/go-jose/v3/json/stream.go | 485 + .../go-jose/go-jose/v3/json/tags.go | 44 + vendor/github.com/go-jose/go-jose/v3/jwe.go | 295 + vendor/github.com/go-jose/go-jose/v3/jwk.go | 798 + vendor/github.com/go-jose/go-jose/v3/jws.go | 366 + .../github.com/go-jose/go-jose/v3/opaque.go | 144 + .../github.com/go-jose/go-jose/v3/shared.go | 520 + .../github.com/go-jose/go-jose/v3/signing.go | 450 + .../go-jose/go-jose/v3/symmetric.go | 495 + vendor/github.com/go-ldap/ldap/v3/BUILD | 34 + vendor/github.com/go-ldap/ldap/v3/LICENSE | 22 + vendor/github.com/go-ldap/ldap/v3/add.go | 89 + vendor/github.com/go-ldap/ldap/v3/bind.go | 735 + vendor/github.com/go-ldap/ldap/v3/client.go | 37 + vendor/github.com/go-ldap/ldap/v3/compare.go | 62 + vendor/github.com/go-ldap/ldap/v3/conn.go | 622 + vendor/github.com/go-ldap/ldap/v3/control.go | 852 + vendor/github.com/go-ldap/ldap/v3/debug.go | 28 + vendor/github.com/go-ldap/ldap/v3/del.go | 59 + vendor/github.com/go-ldap/ldap/v3/dn.go | 350 + vendor/github.com/go-ldap/ldap/v3/doc.go | 4 + vendor/github.com/go-ldap/ldap/v3/error.go | 253 + vendor/github.com/go-ldap/ldap/v3/filter.go | 486 + vendor/github.com/go-ldap/ldap/v3/ldap.go | 388 + vendor/github.com/go-ldap/ldap/v3/moddn.go | 102 + vendor/github.com/go-ldap/ldap/v3/modify.go | 181 + .../go-ldap/ldap/v3/passwdmodify.go | 119 + vendor/github.com/go-ldap/ldap/v3/request.go | 110 + vendor/github.com/go-ldap/ldap/v3/search.go | 640 + vendor/github.com/go-ldap/ldap/v3/unbind.go | 38 + vendor/github.com/go-ldap/ldap/v3/whoami.go | 91 + .../github.com/go-openapi/inflect/.hgignore | 1 + vendor/github.com/go-openapi/inflect/BUILD | 9 + vendor/github.com/go-openapi/inflect/LICENCE | 7 + vendor/github.com/go-openapi/inflect/README | 168 + .../github.com/go-openapi/inflect/inflect.go | 713 + .../github.com/go-sql-driver/mysql/.gitignore | 9 + vendor/github.com/go-sql-driver/mysql/AUTHORS | 126 + vendor/github.com/go-sql-driver/mysql/BUILD | 32 + .../go-sql-driver/mysql/CHANGELOG.md | 266 + vendor/github.com/go-sql-driver/mysql/LICENSE | 373 + .../github.com/go-sql-driver/mysql/README.md | 531 + .../go-sql-driver/mysql/atomic_bool.go | 19 + .../go-sql-driver/mysql/atomic_bool_go118.go | 47 + vendor/github.com/go-sql-driver/mysql/auth.go | 437 + .../github.com/go-sql-driver/mysql/buffer.go | 182 + .../go-sql-driver/mysql/collations.go | 266 + .../go-sql-driver/mysql/conncheck.go | 55 + .../go-sql-driver/mysql/conncheck_dummy.go | 18 + .../go-sql-driver/mysql/connection.go | 650 + .../go-sql-driver/mysql/connector.go | 146 + .../github.com/go-sql-driver/mysql/const.go | 174 + .../github.com/go-sql-driver/mysql/driver.go | 107 + vendor/github.com/go-sql-driver/mysql/dsn.go | 577 + .../github.com/go-sql-driver/mysql/errors.go | 77 + .../github.com/go-sql-driver/mysql/fields.go | 206 + vendor/github.com/go-sql-driver/mysql/fuzz.go | 25 + .../github.com/go-sql-driver/mysql/infile.go | 182 + .../go-sql-driver/mysql/nulltime.go | 71 + .../github.com/go-sql-driver/mysql/packets.go | 1349 + .../github.com/go-sql-driver/mysql/result.go | 22 + vendor/github.com/go-sql-driver/mysql/rows.go | 223 + .../go-sql-driver/mysql/statement.go | 220 + .../go-sql-driver/mysql/transaction.go | 31 + .../github.com/go-sql-driver/mysql/utils.go | 834 + .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 8 + .../googleapis/gax-go/v2/callctx/callctx.go | 74 + .../github.com/googleapis/gax-go/v2/header.go | 49 + .../googleapis/gax-go/v2/internal/version.go | 2 +- vendor/github.com/gorilla/handlers/BUILD | 19 + vendor/github.com/gorilla/handlers/LICENSE | 22 + vendor/github.com/gorilla/handlers/README.md | 56 + .../github.com/gorilla/handlers/canonical.go | 74 + .../github.com/gorilla/handlers/compress.go | 143 + vendor/github.com/gorilla/handlers/cors.go | 355 + vendor/github.com/gorilla/handlers/doc.go | 9 + .../github.com/gorilla/handlers/handlers.go | 147 + vendor/github.com/gorilla/handlers/logging.go | 244 + .../gorilla/handlers/proxy_headers.go | 120 + .../github.com/gorilla/handlers/recovery.go | 96 + vendor/github.com/gorilla/mux/AUTHORS | 8 + vendor/github.com/gorilla/mux/BUILD | 16 + vendor/github.com/gorilla/mux/LICENSE | 27 + vendor/github.com/gorilla/mux/README.md | 805 + vendor/github.com/gorilla/mux/doc.go | 306 + vendor/github.com/gorilla/mux/middleware.go | 74 + vendor/github.com/gorilla/mux/mux.go | 606 + vendor/github.com/gorilla/mux/regexp.go | 388 + vendor/github.com/gorilla/mux/route.go | 736 + vendor/github.com/gorilla/mux/test_helpers.go | 19 + vendor/github.com/hashicorp/hcl/v2/BUILD | 38 + .../github.com/hashicorp/hcl/v2/CHANGELOG.md | 212 + vendor/github.com/hashicorp/hcl/v2/LICENSE | 353 + vendor/github.com/hashicorp/hcl/v2/README.md | 219 + .../github.com/hashicorp/hcl/v2/diagnostic.go | 186 + .../hashicorp/hcl/v2/diagnostic_text.go | 311 + .../hashicorp/hcl/v2/diagnostic_typeparams.go | 39 + .../github.com/hashicorp/hcl/v2/didyoumean.go | 24 + vendor/github.com/hashicorp/hcl/v2/doc.go | 34 + .../hashicorp/hcl/v2/eval_context.go | 25 + .../github.com/hashicorp/hcl/v2/expr_call.go | 46 + .../github.com/hashicorp/hcl/v2/expr_list.go | 37 + .../github.com/hashicorp/hcl/v2/expr_map.go | 44 + .../hashicorp/hcl/v2/expr_unwrap.go | 68 + .../hashicorp/hcl/v2/ext/customdecode/BUILD | 16 + .../hcl/v2/ext/customdecode/README.md | 209 + .../hcl/v2/ext/customdecode/customdecode.go | 56 + .../v2/ext/customdecode/expression_type.go | 146 + .../hashicorp/hcl/v2/ext/tryfunc/BUILD | 15 + .../hashicorp/hcl/v2/ext/tryfunc/README.md | 44 + .../hashicorp/hcl/v2/ext/tryfunc/tryfunc.go | 150 + .../github.com/hashicorp/hcl/v2/gohcl/BUILD | 22 + .../hashicorp/hcl/v2/gohcl/decode.go | 320 + .../github.com/hashicorp/hcl/v2/gohcl/doc.go | 62 + .../hashicorp/hcl/v2/gohcl/encode.go | 191 + .../hashicorp/hcl/v2/gohcl/schema.go | 181 + .../hashicorp/hcl/v2/gohcl/types.go | 16 + .../hashicorp/hcl/v2/hclparse/BUILD | 14 + .../hashicorp/hcl/v2/hclparse/parser.go | 135 + .../hashicorp/hcl/v2/hclsyntax/BUILD | 45 + .../hashicorp/hcl/v2/hclsyntax/diagnostics.go | 23 + .../hashicorp/hcl/v2/hclsyntax/didyoumean.go | 24 + .../hashicorp/hcl/v2/hclsyntax/doc.go | 7 + .../hashicorp/hcl/v2/hclsyntax/expression.go | 1814 + .../hcl/v2/hclsyntax/expression_ops.go | 268 + .../hcl/v2/hclsyntax/expression_template.go | 239 + .../hcl/v2/hclsyntax/expression_vars.go | 76 + .../hashicorp/hcl/v2/hclsyntax/file.go | 20 + .../hashicorp/hcl/v2/hclsyntax/generate.go | 9 + .../hashicorp/hcl/v2/hclsyntax/keywords.go | 21 + .../hashicorp/hcl/v2/hclsyntax/navigation.go | 59 + .../hashicorp/hcl/v2/hclsyntax/node.go | 22 + .../hashicorp/hcl/v2/hclsyntax/parser.go | 2147 + .../hcl/v2/hclsyntax/parser_template.go | 830 + .../hcl/v2/hclsyntax/parser_traversal.go | 159 + .../hashicorp/hcl/v2/hclsyntax/peeker.go | 212 + .../hashicorp/hcl/v2/hclsyntax/public.go | 171 + .../hcl/v2/hclsyntax/scan_string_lit.go | 301 + .../hcl/v2/hclsyntax/scan_string_lit.rl | 105 + .../hashicorp/hcl/v2/hclsyntax/scan_tokens.go | 5265 + .../hashicorp/hcl/v2/hclsyntax/scan_tokens.rl | 395 + .../hashicorp/hcl/v2/hclsyntax/spec.md | 943 + .../hashicorp/hcl/v2/hclsyntax/structure.go | 393 + .../hcl/v2/hclsyntax/structure_at_pos.go | 118 + .../hashicorp/hcl/v2/hclsyntax/token.go | 333 + .../hcl/v2/hclsyntax/token_type_string.go | 131 + .../hcl/v2/hclsyntax/unicode2ragel.rb | 335 + .../hcl/v2/hclsyntax/unicode_derived.rl | 2135 + .../hashicorp/hcl/v2/hclsyntax/variables.go | 86 + .../hashicorp/hcl/v2/hclsyntax/walk.go | 41 + .../hashicorp/hcl/v2/hclwrite/BUILD | 30 + .../hashicorp/hcl/v2/hclwrite/ast.go | 121 + .../hcl/v2/hclwrite/ast_attribute.go | 48 + .../hashicorp/hcl/v2/hclwrite/ast_block.go | 177 + .../hashicorp/hcl/v2/hclwrite/ast_body.go | 239 + .../hcl/v2/hclwrite/ast_expression.go | 224 + .../hashicorp/hcl/v2/hclwrite/doc.go | 11 + .../hashicorp/hcl/v2/hclwrite/format.go | 467 + .../hashicorp/hcl/v2/hclwrite/generate.go | 396 + .../hcl/v2/hclwrite/native_node_sorter.go | 23 + .../hashicorp/hcl/v2/hclwrite/node.go | 296 + .../hashicorp/hcl/v2/hclwrite/parser.go | 638 + .../hashicorp/hcl/v2/hclwrite/public.go | 44 + .../hashicorp/hcl/v2/hclwrite/tokens.go | 132 + vendor/github.com/hashicorp/hcl/v2/json/BUILD | 29 + .../github.com/hashicorp/hcl/v2/json/ast.go | 121 + .../hashicorp/hcl/v2/json/didyoumean.go | 33 + .../github.com/hashicorp/hcl/v2/json/doc.go | 12 + vendor/github.com/hashicorp/hcl/v2/json/is.go | 54 + .../hashicorp/hcl/v2/json/navigation.go | 70 + .../hashicorp/hcl/v2/json/parser.go | 504 + .../hashicorp/hcl/v2/json/peeker.go | 25 + .../hashicorp/hcl/v2/json/public.go | 117 + .../hashicorp/hcl/v2/json/scanner.go | 306 + .../github.com/hashicorp/hcl/v2/json/spec.md | 405 + .../hashicorp/hcl/v2/json/structure.go | 637 + .../hashicorp/hcl/v2/json/tokentype_string.go | 29 + vendor/github.com/hashicorp/hcl/v2/merged.go | 226 + vendor/github.com/hashicorp/hcl/v2/ops.go | 432 + vendor/github.com/hashicorp/hcl/v2/pos.go | 275 + .../hashicorp/hcl/v2/pos_scanner.go | 152 + vendor/github.com/hashicorp/hcl/v2/schema.go | 21 + vendor/github.com/hashicorp/hcl/v2/spec.md | 691 + .../hashicorp/hcl/v2/static_expr.go | 40 + .../github.com/hashicorp/hcl/v2/structure.go | 151 + .../hashicorp/hcl/v2/structure_at_pos.go | 117 + .../github.com/hashicorp/hcl/v2/traversal.go | 293 + .../hashicorp/hcl/v2/traversal_for_expr.go | 124 + vendor/github.com/huandu/xstrings/.gitignore | 24 + vendor/github.com/huandu/xstrings/BUILD | 19 + .../huandu/xstrings/CONTRIBUTING.md | 23 + vendor/github.com/huandu/xstrings/LICENSE | 22 + vendor/github.com/huandu/xstrings/README.md | 117 + vendor/github.com/huandu/xstrings/common.go | 21 + vendor/github.com/huandu/xstrings/convert.go | 590 + vendor/github.com/huandu/xstrings/count.go | 120 + vendor/github.com/huandu/xstrings/doc.go | 8 + vendor/github.com/huandu/xstrings/format.go | 169 + .../github.com/huandu/xstrings/manipulate.go | 216 + .../huandu/xstrings/stringbuilder.go | 7 + .../huandu/xstrings/stringbuilder_go110.go | 9 + .../github.com/huandu/xstrings/translate.go | 546 + .../github.com/imdario/mergo/.deepsource.toml | 12 + vendor/github.com/imdario/mergo/.gitignore | 33 + vendor/github.com/imdario/mergo/.travis.yml | 9 + vendor/github.com/imdario/mergo/BUILD | 14 + .../imdario/mergo/CODE_OF_CONDUCT.md | 46 + vendor/github.com/imdario/mergo/LICENSE | 28 + vendor/github.com/imdario/mergo/README.md | 247 + vendor/github.com/imdario/mergo/doc.go | 143 + vendor/github.com/imdario/mergo/map.go | 178 + vendor/github.com/imdario/mergo/merge.go | 375 + vendor/github.com/imdario/mergo/mergo.go | 78 + .../inconshreveable/mousetrap/BUILD | 12 + .../inconshreveable/mousetrap/LICENSE | 201 + .../inconshreveable/mousetrap/README.md | 23 + .../inconshreveable/mousetrap/trap_others.go | 16 + .../inconshreveable/mousetrap/trap_windows.go | 42 + .../mattermost/xml-roundtrip-validator/BUILD | 9 + .../xml-roundtrip-validator/LICENSE.txt | 201 + .../xml-roundtrip-validator/README.md | 73 + .../xml-roundtrip-validator/SECURITY.md | 25 + .../xml-roundtrip-validator/validator.go | 292 + .../github.com/mattn/go-sqlite3/.codecov.yml | 4 + vendor/github.com/mattn/go-sqlite3/.gitignore | 14 + vendor/github.com/mattn/go-sqlite3/BUILD | 126 + vendor/github.com/mattn/go-sqlite3/LICENSE | 21 + vendor/github.com/mattn/go-sqlite3/README.md | 603 + vendor/github.com/mattn/go-sqlite3/backup.go | 85 + .../github.com/mattn/go-sqlite3/callback.go | 411 + vendor/github.com/mattn/go-sqlite3/convert.go | 299 + vendor/github.com/mattn/go-sqlite3/doc.go | 135 + vendor/github.com/mattn/go-sqlite3/error.go | 150 + .../mattn/go-sqlite3/sqlite3-binding.c | 247989 +++++++++++++++ .../mattn/go-sqlite3/sqlite3-binding.h | 13169 + vendor/github.com/mattn/go-sqlite3/sqlite3.go | 2262 + .../mattn/go-sqlite3/sqlite3_context.go | 103 + .../mattn/go-sqlite3/sqlite3_func_crypt.go | 120 + .../mattn/go-sqlite3/sqlite3_go18.go | 54 + .../mattn/go-sqlite3/sqlite3_libsqlite3.go | 21 + .../go-sqlite3/sqlite3_load_extension.go | 84 + .../go-sqlite3/sqlite3_load_extension_omit.go | 24 + .../sqlite3_opt_allow_uri_authority.go | 15 + .../mattn/go-sqlite3/sqlite3_opt_app_armor.go | 16 + .../go-sqlite3/sqlite3_opt_column_metadata.go | 21 + .../go-sqlite3/sqlite3_opt_foreign_keys.go | 15 + .../mattn/go-sqlite3/sqlite3_opt_fts5.go | 14 + .../mattn/go-sqlite3/sqlite3_opt_icu.go | 19 + .../go-sqlite3/sqlite3_opt_introspect.go | 15 + .../go-sqlite3/sqlite3_opt_math_functions.go | 14 + .../mattn/go-sqlite3/sqlite3_opt_os_trace.go | 15 + .../mattn/go-sqlite3/sqlite3_opt_preupdate.go | 20 + .../go-sqlite3/sqlite3_opt_preupdate_hook.go | 112 + .../go-sqlite3/sqlite3_opt_preupdate_omit.go | 21 + .../go-sqlite3/sqlite3_opt_secure_delete.go | 15 + .../sqlite3_opt_secure_delete_fast.go | 15 + .../mattn/go-sqlite3/sqlite3_opt_serialize.go | 82 + .../go-sqlite3/sqlite3_opt_serialize_omit.go | 20 + .../mattn/go-sqlite3/sqlite3_opt_stat4.go | 15 + .../go-sqlite3/sqlite3_opt_unlock_notify.c | 85 + .../go-sqlite3/sqlite3_opt_unlock_notify.go | 93 + .../mattn/go-sqlite3/sqlite3_opt_userauth.go | 289 + .../go-sqlite3/sqlite3_opt_userauth_omit.go | 152 + .../go-sqlite3/sqlite3_opt_vacuum_full.go | 15 + .../go-sqlite3/sqlite3_opt_vacuum_incr.go | 15 + .../mattn/go-sqlite3/sqlite3_opt_vtable.go | 720 + .../mattn/go-sqlite3/sqlite3_other.go | 17 + .../mattn/go-sqlite3/sqlite3_solaris.go | 14 + .../mattn/go-sqlite3/sqlite3_trace.go | 287 + .../mattn/go-sqlite3/sqlite3_type.go | 108 + .../go-sqlite3/sqlite3_usleep_windows.go | 41 + .../mattn/go-sqlite3/sqlite3_windows.go | 17 + .../github.com/mattn/go-sqlite3/sqlite3ext.h | 718 + .../mattn/go-sqlite3/static_mock.go | 37 + .../mitchellh/copystructure/.travis.yml | 12 + .../github.com/mitchellh/copystructure/BUILD | 13 + .../mitchellh/copystructure/LICENSE | 21 + .../mitchellh/copystructure/README.md | 21 + .../mitchellh/copystructure/copier_time.go | 15 + .../mitchellh/copystructure/copystructure.go | 548 + vendor/github.com/mitchellh/go-wordwrap/BUILD | 9 + .../mitchellh/go-wordwrap/LICENSE.md | 21 + .../mitchellh/go-wordwrap/README.md | 39 + .../mitchellh/go-wordwrap/wordwrap.go | 73 + .../mitchellh/reflectwalk/.travis.yml | 1 + vendor/github.com/mitchellh/reflectwalk/BUILD | 13 + .../github.com/mitchellh/reflectwalk/LICENSE | 21 + .../mitchellh/reflectwalk/README.md | 6 + .../mitchellh/reflectwalk/location.go | 19 + .../mitchellh/reflectwalk/location_string.go | 16 + .../mitchellh/reflectwalk/reflectwalk.go | 401 + vendor/github.com/oklog/run/.gitignore | 14 + vendor/github.com/oklog/run/BUILD | 12 + vendor/github.com/oklog/run/LICENSE | 201 + vendor/github.com/oklog/run/README.md | 75 + vendor/github.com/oklog/run/actors.go | 38 + vendor/github.com/oklog/run/group.go | 62 + .../client_golang/prometheus/collectors/BUILD | 20 + .../prometheus/collectors/collectors.go | 40 + .../collectors/dbstats_collector.go | 119 + .../prometheus/collectors/expvar_collector.go | 57 + .../collectors/go_collector_go116.go | 49 + .../collectors/go_collector_latest.go | 162 + .../collectors/process_collector.go | 56 + .../russellhaering/goxmldsig/.gitignore | 1 + .../russellhaering/goxmldsig/.travis.yml | 11 + .../github.com/russellhaering/goxmldsig/BUILD | 23 + .../russellhaering/goxmldsig/LICENSE | 175 + .../russellhaering/goxmldsig/README.md | 103 + .../russellhaering/goxmldsig/canonicalize.go | 271 + .../russellhaering/goxmldsig/clock.go | 55 + .../russellhaering/goxmldsig/etreeutils/BUILD | 15 + .../goxmldsig/etreeutils/canonicalize.go | 110 + .../goxmldsig/etreeutils/namespace.go | 428 + .../goxmldsig/etreeutils/sort.go | 83 + .../goxmldsig/etreeutils/unmarshal.go | 43 + .../russellhaering/goxmldsig/keystore.go | 67 + .../russellhaering/goxmldsig/run_test.sh | 12 + .../russellhaering/goxmldsig/sign.go | 334 + .../russellhaering/goxmldsig/tls_keystore.go | 39 + .../russellhaering/goxmldsig/types/BUILD | 10 + .../goxmldsig/types/signature.go | 93 + .../russellhaering/goxmldsig/validate.go | 484 + .../russellhaering/goxmldsig/xml_constants.go | 123 + .../github.com/shopspring/decimal/.gitignore | 6 + .../github.com/shopspring/decimal/.travis.yml | 13 + vendor/github.com/shopspring/decimal/BUILD | 13 + .../shopspring/decimal/CHANGELOG.md | 19 + vendor/github.com/shopspring/decimal/LICENSE | 45 + .../github.com/shopspring/decimal/README.md | 130 + .../shopspring/decimal/decimal-go.go | 415 + .../github.com/shopspring/decimal/decimal.go | 1477 + .../github.com/shopspring/decimal/rounding.go | 119 + vendor/github.com/spf13/cast/.gitignore | 25 + vendor/github.com/spf13/cast/BUILD | 13 + vendor/github.com/spf13/cast/LICENSE | 21 + vendor/github.com/spf13/cast/Makefile | 40 + vendor/github.com/spf13/cast/README.md | 75 + vendor/github.com/spf13/cast/cast.go | 176 + vendor/github.com/spf13/cast/caste.go | 1337 + .../spf13/cast/timeformattype_string.go | 27 + vendor/github.com/spf13/cobra/.gitignore | 39 + vendor/github.com/spf13/cobra/.golangci.yml | 62 + vendor/github.com/spf13/cobra/.mailmap | 3 + vendor/github.com/spf13/cobra/BUILD | 32 + vendor/github.com/spf13/cobra/CONDUCT.md | 37 + vendor/github.com/spf13/cobra/CONTRIBUTING.md | 50 + vendor/github.com/spf13/cobra/LICENSE.txt | 174 + vendor/github.com/spf13/cobra/MAINTAINERS | 13 + vendor/github.com/spf13/cobra/Makefile | 35 + vendor/github.com/spf13/cobra/README.md | 112 + vendor/github.com/spf13/cobra/active_help.go | 63 + vendor/github.com/spf13/cobra/active_help.md | 157 + vendor/github.com/spf13/cobra/args.go | 131 + .../spf13/cobra/bash_completions.go | 712 + .../spf13/cobra/bash_completions.md | 93 + .../spf13/cobra/bash_completionsV2.go | 396 + vendor/github.com/spf13/cobra/cobra.go | 239 + vendor/github.com/spf13/cobra/command.go | 1834 + .../github.com/spf13/cobra/command_notwin.go | 20 + vendor/github.com/spf13/cobra/command_win.go | 41 + vendor/github.com/spf13/cobra/completions.go | 878 + .../spf13/cobra/fish_completions.go | 292 + .../spf13/cobra/fish_completions.md | 4 + vendor/github.com/spf13/cobra/flag_groups.go | 224 + .../spf13/cobra/powershell_completions.go | 325 + .../spf13/cobra/powershell_completions.md | 3 + .../spf13/cobra/projects_using_cobra.md | 64 + .../spf13/cobra/shell_completions.go | 98 + .../spf13/cobra/shell_completions.md | 576 + vendor/github.com/spf13/cobra/user_guide.md | 726 + .../github.com/spf13/cobra/zsh_completions.go | 308 + .../github.com/spf13/cobra/zsh_completions.md | 48 + vendor/github.com/zclconf/go-cty/LICENSE | 21 + vendor/github.com/zclconf/go-cty/cty/BUILD | 44 + .../github.com/zclconf/go-cty/cty/capsule.go | 128 + .../zclconf/go-cty/cty/capsule_ops.go | 132 + .../zclconf/go-cty/cty/collection.go | 34 + .../zclconf/go-cty/cty/convert/BUILD | 24 + .../go-cty/cty/convert/compare_types.go | 165 + .../zclconf/go-cty/cty/convert/conversion.go | 190 + .../go-cty/cty/convert/conversion_capsule.go | 31 + .../cty/convert/conversion_collection.go | 569 + .../go-cty/cty/convert/conversion_dynamic.go | 33 + .../go-cty/cty/convert/conversion_object.go | 95 + .../cty/convert/conversion_primitive.go | 57 + .../go-cty/cty/convert/conversion_tuple.go | 71 + .../zclconf/go-cty/cty/convert/doc.go | 15 + .../go-cty/cty/convert/mismatch_msg.go | 226 + .../zclconf/go-cty/cty/convert/public.go | 83 + .../zclconf/go-cty/cty/convert/sort_types.go | 69 + .../zclconf/go-cty/cty/convert/unify.go | 357 + vendor/github.com/zclconf/go-cty/cty/doc.go | 18 + .../zclconf/go-cty/cty/element_iterator.go | 194 + vendor/github.com/zclconf/go-cty/cty/error.go | 55 + .../zclconf/go-cty/cty/function/BUILD | 16 + .../zclconf/go-cty/cty/function/argument.go | 70 + .../zclconf/go-cty/cty/function/doc.go | 6 + .../zclconf/go-cty/cty/function/error.go | 50 + .../zclconf/go-cty/cty/function/function.go | 346 + .../zclconf/go-cty/cty/function/stdlib/BUILD | 35 + .../go-cty/cty/function/stdlib/bool.go | 78 + .../go-cty/cty/function/stdlib/bytes.go | 112 + .../go-cty/cty/function/stdlib/collection.go | 1339 + .../go-cty/cty/function/stdlib/conversion.go | 87 + .../zclconf/go-cty/cty/function/stdlib/csv.go | 93 + .../go-cty/cty/function/stdlib/datetime.go | 434 + .../zclconf/go-cty/cty/function/stdlib/doc.go | 13 + .../go-cty/cty/function/stdlib/format.go | 517 + .../go-cty/cty/function/stdlib/format_fsm.go | 374 + .../go-cty/cty/function/stdlib/format_fsm.rl | 198 + .../go-cty/cty/function/stdlib/general.go | 107 + .../go-cty/cty/function/stdlib/json.go | 77 + .../go-cty/cty/function/stdlib/number.go | 653 + .../go-cty/cty/function/stdlib/regexp.go | 233 + .../go-cty/cty/function/stdlib/sequence.go | 218 + .../zclconf/go-cty/cty/function/stdlib/set.go | 222 + .../go-cty/cty/function/stdlib/string.go | 546 + .../cty/function/stdlib/string_replace.go | 80 + .../go-cty/cty/function/unpredictable.go | 31 + vendor/github.com/zclconf/go-cty/cty/gob.go | 204 + .../github.com/zclconf/go-cty/cty/gocty/BUILD | 20 + .../zclconf/go-cty/cty/gocty/doc.go | 7 + .../zclconf/go-cty/cty/gocty/helpers.go | 43 + .../github.com/zclconf/go-cty/cty/gocty/in.go | 548 + .../zclconf/go-cty/cty/gocty/out.go | 686 + .../zclconf/go-cty/cty/gocty/type_implied.go | 108 + .../github.com/zclconf/go-cty/cty/helper.go | 99 + vendor/github.com/zclconf/go-cty/cty/json.go | 199 + .../github.com/zclconf/go-cty/cty/json/BUILD | 21 + .../github.com/zclconf/go-cty/cty/json/doc.go | 11 + .../zclconf/go-cty/cty/json/marshal.go | 193 + .../zclconf/go-cty/cty/json/simple.go | 41 + .../zclconf/go-cty/cty/json/type.go | 23 + .../zclconf/go-cty/cty/json/type_implied.go | 170 + .../zclconf/go-cty/cty/json/unmarshal.go | 459 + .../zclconf/go-cty/cty/json/value.go | 65 + .../zclconf/go-cty/cty/list_type.go | 74 + .../github.com/zclconf/go-cty/cty/map_type.go | 74 + vendor/github.com/zclconf/go-cty/cty/marks.go | 368 + vendor/github.com/zclconf/go-cty/cty/null.go | 14 + .../zclconf/go-cty/cty/object_type.go | 220 + vendor/github.com/zclconf/go-cty/cty/path.go | 270 + .../github.com/zclconf/go-cty/cty/path_set.go | 204 + .../zclconf/go-cty/cty/primitive_type.go | 122 + .../github.com/zclconf/go-cty/cty/set/BUILD | 15 + .../github.com/zclconf/go-cty/cty/set/gob.go | 76 + .../zclconf/go-cty/cty/set/iterator.go | 15 + .../github.com/zclconf/go-cty/cty/set/ops.go | 210 + .../zclconf/go-cty/cty/set/rules.go | 47 + .../github.com/zclconf/go-cty/cty/set/set.go | 62 + .../zclconf/go-cty/cty/set_helper.go | 132 + .../zclconf/go-cty/cty/set_internals.go | 255 + .../github.com/zclconf/go-cty/cty/set_type.go | 72 + .../zclconf/go-cty/cty/tuple_type.go | 121 + vendor/github.com/zclconf/go-cty/cty/type.go | 123 + .../zclconf/go-cty/cty/type_conform.go | 139 + .../zclconf/go-cty/cty/types_to_register.go | 57 + .../github.com/zclconf/go-cty/cty/unknown.go | 85 + .../zclconf/go-cty/cty/unknown_as_null.go | 64 + vendor/github.com/zclconf/go-cty/cty/value.go | 142 + .../zclconf/go-cty/cty/value_init.go | 319 + .../zclconf/go-cty/cty/value_ops.go | 1368 + vendor/github.com/zclconf/go-cty/cty/walk.go | 227 + vendor/go.etcd.io/etcd/api/v3/LICENSE | 202 + vendor/go.etcd.io/etcd/api/v3/authpb/BUILD | 13 + .../go.etcd.io/etcd/api/v3/authpb/auth.pb.go | 1158 + .../go.etcd.io/etcd/api/v3/authpb/auth.proto | 42 + .../go.etcd.io/etcd/api/v3/etcdserverpb/BUILD | 25 + .../etcd/api/v3/etcdserverpb/etcdserver.pb.go | 1002 + .../etcd/api/v3/etcdserverpb/etcdserver.proto | 34 + .../api/v3/etcdserverpb/raft_internal.pb.go | 2673 + .../api/v3/etcdserverpb/raft_internal.proto | 81 + .../v3/etcdserverpb/raft_internal_stringer.go | 183 + .../etcd/api/v3/etcdserverpb/rpc.pb.go | 25862 ++ .../etcd/api/v3/etcdserverpb/rpc.proto | 1199 + .../go.etcd.io/etcd/api/v3/membershippb/BUILD | 13 + .../etcd/api/v3/membershippb/membership.pb.go | 1454 + .../etcd/api/v3/membershippb/membership.proto | 43 + vendor/go.etcd.io/etcd/api/v3/mvccpb/BUILD | 13 + vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go | 798 + vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto | 49 + .../etcd/api/v3/v3rpc/rpctypes/BUILD | 18 + .../etcd/api/v3/v3rpc/rpctypes/doc.go | 16 + .../etcd/api/v3/v3rpc/rpctypes/error.go | 264 + .../etcd/api/v3/v3rpc/rpctypes/md.go | 22 + .../api/v3/v3rpc/rpctypes/metadatafields.go | 20 + vendor/go.etcd.io/etcd/api/v3/version/BUILD | 10 + .../go.etcd.io/etcd/api/v3/version/version.go | 56 + vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE | 202 + .../etcd/client/pkg/v3/fileutil/BUILD | 48 + .../etcd/client/pkg/v3/fileutil/dir_unix.go | 28 + .../client/pkg/v3/fileutil/dir_windows.go | 52 + .../etcd/client/pkg/v3/fileutil/doc.go | 16 + .../etcd/client/pkg/v3/fileutil/filereader.go | 60 + .../etcd/client/pkg/v3/fileutil/fileutil.go | 168 + .../etcd/client/pkg/v3/fileutil/lock.go | 26 + .../etcd/client/pkg/v3/fileutil/lock_flock.go | 50 + .../etcd/client/pkg/v3/fileutil/lock_linux.go | 93 + .../etcd/client/pkg/v3/fileutil/lock_plan9.go | 45 + .../client/pkg/v3/fileutil/lock_solaris.go | 63 + .../etcd/client/pkg/v3/fileutil/lock_unix.go | 30 + .../client/pkg/v3/fileutil/lock_windows.go | 126 + .../client/pkg/v3/fileutil/preallocate.go | 54 + .../pkg/v3/fileutil/preallocate_darwin.go | 67 + .../pkg/v3/fileutil/preallocate_unix.go | 50 + .../v3/fileutil/preallocate_unsupported.go | 26 + .../etcd/client/pkg/v3/fileutil/purge.go | 101 + .../etcd/client/pkg/v3/fileutil/read_dir.go | 70 + .../etcd/client/pkg/v3/fileutil/sync.go | 30 + .../client/pkg/v3/fileutil/sync_darwin.go | 39 + .../etcd/client/pkg/v3/fileutil/sync_linux.go | 35 + .../etcd/client/pkg/v3/logutil/BUILD | 72 + .../etcd/client/pkg/v3/logutil/doc.go | 16 + .../etcd/client/pkg/v3/logutil/log_level.go | 30 + .../etcd/client/pkg/v3/logutil/zap.go | 108 + .../etcd/client/pkg/v3/logutil/zap_journal.go | 93 + .../etcd/client/pkg/v3/systemd/BUILD | 12 + .../etcd/client/pkg/v3/systemd/doc.go | 16 + .../etcd/client/pkg/v3/systemd/journal.go | 29 + .../etcd/client/pkg/v3/tlsutil/BUILD | 14 + .../client/pkg/v3/tlsutil/cipher_suites.go | 56 + .../etcd/client/pkg/v3/tlsutil/doc.go | 16 + .../etcd/client/pkg/v3/tlsutil/tlsutil.go | 73 + .../etcd/client/pkg/v3/tlsutil/versions.go | 47 + .../etcd/client/pkg/v3/transport/BUILD | 73 + .../etcd/client/pkg/v3/transport/doc.go | 17 + .../pkg/v3/transport/keepalive_listener.go | 121 + .../client/pkg/v3/transport/limit_listen.go | 86 + .../etcd/client/pkg/v3/transport/listener.go | 594 + .../client/pkg/v3/transport/listener_opts.go | 76 + .../client/pkg/v3/transport/listener_tls.go | 272 + .../etcd/client/pkg/v3/transport/sockopt.go | 45 + .../pkg/v3/transport/sockopt_solaris.go | 35 + .../client/pkg/v3/transport/sockopt_unix.go | 36 + .../pkg/v3/transport/sockopt_windows.go | 19 + .../client/pkg/v3/transport/timeout_conn.go | 44 + .../client/pkg/v3/transport/timeout_dialer.go | 36 + .../pkg/v3/transport/timeout_listener.go | 45 + .../pkg/v3/transport/timeout_transport.go | 51 + .../etcd/client/pkg/v3/transport/tls.go | 49 + .../etcd/client/pkg/v3/transport/transport.go | 77 + .../client/pkg/v3/transport/unix_listener.go | 40 + .../go.etcd.io/etcd/client/pkg/v3/types/BUILD | 16 + .../etcd/client/pkg/v3/types/doc.go | 17 + .../go.etcd.io/etcd/client/pkg/v3/types/id.go | 39 + .../etcd/client/pkg/v3/types/set.go | 195 + .../etcd/client/pkg/v3/types/slice.go | 22 + .../etcd/client/pkg/v3/types/urls.go | 82 + .../etcd/client/pkg/v3/types/urlsmap.go | 107 + vendor/go.etcd.io/etcd/client/v3/BUILD | 52 + vendor/go.etcd.io/etcd/client/v3/LICENSE | 202 + vendor/go.etcd.io/etcd/client/v3/README.md | 92 + vendor/go.etcd.io/etcd/client/v3/auth.go | 236 + vendor/go.etcd.io/etcd/client/v3/client.go | 611 + vendor/go.etcd.io/etcd/client/v3/cluster.go | 141 + .../go.etcd.io/etcd/client/v3/compact_op.go | 51 + vendor/go.etcd.io/etcd/client/v3/compare.go | 140 + vendor/go.etcd.io/etcd/client/v3/config.go | 92 + .../etcd/client/v3/credentials/BUILD | 13 + .../etcd/client/v3/credentials/credentials.go | 131 + vendor/go.etcd.io/etcd/client/v3/ctx.go | 50 + vendor/go.etcd.io/etcd/client/v3/doc.go | 106 + .../etcd/client/v3/internal/endpoint/BUILD | 12 + .../client/v3/internal/endpoint/endpoint.go | 138 + .../etcd/client/v3/internal/resolver/BUILD | 18 + .../client/v3/internal/resolver/resolver.go | 74 + vendor/go.etcd.io/etcd/client/v3/kv.go | 177 + vendor/go.etcd.io/etcd/client/v3/lease.go | 605 + vendor/go.etcd.io/etcd/client/v3/logger.go | 59 + .../go.etcd.io/etcd/client/v3/maintenance.go | 255 + .../go.etcd.io/etcd/client/v3/namespace/BUILD | 20 + .../etcd/client/v3/namespace/doc.go | 42 + .../go.etcd.io/etcd/client/v3/namespace/kv.go | 206 + .../etcd/client/v3/namespace/lease.go | 57 + .../etcd/client/v3/namespace/util.go | 42 + .../etcd/client/v3/namespace/watch.go | 83 + vendor/go.etcd.io/etcd/client/v3/op.go | 583 + vendor/go.etcd.io/etcd/client/v3/options.go | 69 + vendor/go.etcd.io/etcd/client/v3/retry.go | 306 + .../etcd/client/v3/retry_interceptor.go | 433 + vendor/go.etcd.io/etcd/client/v3/sort.go | 37 + vendor/go.etcd.io/etcd/client/v3/txn.go | 150 + vendor/go.etcd.io/etcd/client/v3/utils.go | 31 + vendor/go.etcd.io/etcd/client/v3/watch.go | 1042 + vendor/go.uber.org/zap/zapgrpc/BUILD | 13 + vendor/go.uber.org/zap/zapgrpc/zapgrpc.go | 241 + vendor/golang.org/x/crypto/bcrypt/BUILD | 13 + vendor/golang.org/x/crypto/bcrypt/base64.go | 35 + vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 304 + vendor/golang.org/x/crypto/blowfish/BUILD | 13 + vendor/golang.org/x/crypto/blowfish/block.go | 159 + vendor/golang.org/x/crypto/blowfish/cipher.go | 99 + vendor/golang.org/x/crypto/blowfish/const.go | 199 + vendor/golang.org/x/crypto/md4/BUILD | 12 + vendor/golang.org/x/crypto/md4/md4.go | 122 + vendor/golang.org/x/crypto/md4/md4block.go | 91 + vendor/golang.org/x/crypto/scrypt/BUILD | 10 + vendor/golang.org/x/crypto/scrypt/scrypt.go | 212 + .../x/mod/internal/lazyregexp/BUILD | 12 + .../x/mod/internal/lazyregexp/lazyre.go | 78 + vendor/golang.org/x/mod/modfile/BUILD | 19 + vendor/golang.org/x/mod/modfile/print.go | 174 + vendor/golang.org/x/mod/modfile/read.go | 958 + vendor/golang.org/x/mod/modfile/rule.go | 1559 + vendor/golang.org/x/mod/modfile/work.go | 234 + vendor/golang.org/x/mod/module/BUILD | 16 + vendor/golang.org/x/mod/module/module.go | 841 + vendor/golang.org/x/mod/module/pseudo.go | 250 + vendor/golang.org/x/net/html/atom/atom.go | 78 + vendor/golang.org/x/net/html/atom/table.go | 783 + vendor/golang.org/x/net/html/const.go | 111 + vendor/golang.org/x/net/html/doc.go | 127 + vendor/golang.org/x/net/html/doctype.go | 156 + vendor/golang.org/x/net/html/entity.go | 2253 + vendor/golang.org/x/net/html/escape.go | 339 + vendor/golang.org/x/net/html/foreign.go | 222 + vendor/golang.org/x/net/html/node.go | 225 + vendor/golang.org/x/net/html/parse.go | 2460 + vendor/golang.org/x/net/html/render.go | 293 + vendor/golang.org/x/net/html/token.go | 1268 + .../x/oauth2/bitbucket/bitbucket.go | 16 + vendor/golang.org/x/oauth2/github/github.go | 16 + .../x/tools/go/types/objectpath/objectpath.go | 68 +- .../x/tools/internal/gocommand/invoke.go | 128 +- .../x/tools/internal/gocommand/version.go | 18 +- .../api/admin/directory/v1/admin-api.json | 8403 + .../api/admin/directory/v1/admin-gen.go | 26141 ++ .../api/internal/gensupport/send.go | 21 + .../google.golang.org/api/internal/version.go | 2 +- .../api/storage/v1/storage-api.json | 42 +- .../api/storage/v1/storage-gen.go | 95 +- .../grpc/resolver/manual/manual.go | 105 + vendor/modules.txt | 260 +- 1208 files changed, 594159 insertions(+), 274 deletions(-) create mode 100644 vendor/ariga.io/atlas/LICENSE create mode 100644 vendor/ariga.io/atlas/schemahcl/BUILD create mode 100644 vendor/ariga.io/atlas/schemahcl/context.go create mode 100644 vendor/ariga.io/atlas/schemahcl/extension.go create mode 100644 vendor/ariga.io/atlas/schemahcl/hcl.go create mode 100644 vendor/ariga.io/atlas/schemahcl/opts.go create mode 100644 vendor/ariga.io/atlas/schemahcl/spec.go create mode 100644 vendor/ariga.io/atlas/schemahcl/stdlib.go create mode 100644 vendor/ariga.io/atlas/schemahcl/types.go create mode 100644 vendor/ariga.io/atlas/sql/internal/specutil/BUILD create mode 100644 vendor/ariga.io/atlas/sql/internal/specutil/convert.go create mode 100644 vendor/ariga.io/atlas/sql/internal/specutil/spec.go create mode 100644 vendor/ariga.io/atlas/sql/internal/sqlx/BUILD create mode 100644 vendor/ariga.io/atlas/sql/internal/sqlx/dev.go create mode 100644 vendor/ariga.io/atlas/sql/internal/sqlx/diff.go create mode 100644 vendor/ariga.io/atlas/sql/internal/sqlx/exclude.go create mode 100644 vendor/ariga.io/atlas/sql/internal/sqlx/plan.go create mode 100644 vendor/ariga.io/atlas/sql/internal/sqlx/sqlx.go create mode 100644 vendor/ariga.io/atlas/sql/migrate/BUILD create mode 100644 vendor/ariga.io/atlas/sql/migrate/dir.go create mode 100644 vendor/ariga.io/atlas/sql/migrate/lex.go create mode 100644 vendor/ariga.io/atlas/sql/migrate/migrate.go create mode 100644 vendor/ariga.io/atlas/sql/migrate/testdata/migrate/atlas.sum create mode 100644 vendor/ariga.io/atlas/sql/mysql/BUILD create mode 100644 vendor/ariga.io/atlas/sql/mysql/convert.go create mode 100644 vendor/ariga.io/atlas/sql/mysql/diff.go create mode 100644 vendor/ariga.io/atlas/sql/mysql/driver.go create mode 100644 vendor/ariga.io/atlas/sql/mysql/inspect.go create mode 100644 vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/BUILD create mode 100644 vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/.README.md create mode 100644 vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/charset2collate create mode 100644 vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/charset2collate.maria create mode 100644 vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/collate2charset create mode 100644 vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/collate2charset.maria create mode 100644 vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/mysqlversion.go create mode 100644 vendor/ariga.io/atlas/sql/mysql/migrate.go create mode 100644 vendor/ariga.io/atlas/sql/mysql/sqlspec.go create mode 100644 vendor/ariga.io/atlas/sql/mysql/tidb.go create mode 100644 vendor/ariga.io/atlas/sql/postgres/BUILD create mode 100644 vendor/ariga.io/atlas/sql/postgres/convert.go create mode 100644 vendor/ariga.io/atlas/sql/postgres/crdb.go create mode 100644 vendor/ariga.io/atlas/sql/postgres/diff.go create mode 100644 vendor/ariga.io/atlas/sql/postgres/driver.go create mode 100644 vendor/ariga.io/atlas/sql/postgres/inspect.go create mode 100644 vendor/ariga.io/atlas/sql/postgres/internal/postgresop/BUILD create mode 100644 vendor/ariga.io/atlas/sql/postgres/internal/postgresop/postgresop.go create mode 100644 vendor/ariga.io/atlas/sql/postgres/migrate.go create mode 100644 vendor/ariga.io/atlas/sql/postgres/sqlspec.go create mode 100644 vendor/ariga.io/atlas/sql/schema/BUILD create mode 100644 vendor/ariga.io/atlas/sql/schema/changekind_string.go create mode 100644 vendor/ariga.io/atlas/sql/schema/dsl.go create mode 100644 vendor/ariga.io/atlas/sql/schema/inspect.go create mode 100644 vendor/ariga.io/atlas/sql/schema/migrate.go create mode 100644 vendor/ariga.io/atlas/sql/schema/schema.go create mode 100644 vendor/ariga.io/atlas/sql/sqlclient/BUILD create mode 100644 vendor/ariga.io/atlas/sql/sqlclient/client.go create mode 100644 vendor/ariga.io/atlas/sql/sqlite/BUILD create mode 100644 vendor/ariga.io/atlas/sql/sqlite/convert.go create mode 100644 vendor/ariga.io/atlas/sql/sqlite/diff.go create mode 100644 vendor/ariga.io/atlas/sql/sqlite/driver.go create mode 100644 vendor/ariga.io/atlas/sql/sqlite/inspect.go create mode 100644 vendor/ariga.io/atlas/sql/sqlite/migrate.go create mode 100644 vendor/ariga.io/atlas/sql/sqlite/sqlspec.go create mode 100644 vendor/ariga.io/atlas/sql/sqlspec/BUILD create mode 100644 vendor/ariga.io/atlas/sql/sqlspec/sqlspec.go create mode 100644 vendor/ariga.io/atlas/sql/sqltool/BUILD create mode 100644 vendor/ariga.io/atlas/sql/sqltool/doc.go create mode 100644 vendor/ariga.io/atlas/sql/sqltool/hidden.go create mode 100644 vendor/ariga.io/atlas/sql/sqltool/hidden_windows.go create mode 100644 vendor/ariga.io/atlas/sql/sqltool/tool.go create mode 100644 vendor/entgo.io/ent/.all-contributorsrc create mode 100644 vendor/entgo.io/ent/.golangci.yml create mode 100644 vendor/entgo.io/ent/BUILD create mode 100644 vendor/entgo.io/ent/CODE_OF_CONDUCT.md create mode 100644 vendor/entgo.io/ent/CONTRIBUTING.md create mode 100644 vendor/entgo.io/ent/LICENSE create mode 100644 vendor/entgo.io/ent/README.md create mode 100644 vendor/entgo.io/ent/README_jp.md create mode 100644 vendor/entgo.io/ent/README_kr.md create mode 100644 vendor/entgo.io/ent/README_zh.md create mode 100644 vendor/entgo.io/ent/dialect/BUILD create mode 100644 vendor/entgo.io/ent/dialect/dialect.go create mode 100644 vendor/entgo.io/ent/dialect/entsql/BUILD create mode 100644 vendor/entgo.io/ent/dialect/entsql/annotation.go create mode 100644 vendor/entgo.io/ent/dialect/sql/BUILD create mode 100644 vendor/entgo.io/ent/dialect/sql/builder.go create mode 100644 vendor/entgo.io/ent/dialect/sql/driver.go create mode 100644 vendor/entgo.io/ent/dialect/sql/scan.go create mode 100644 vendor/entgo.io/ent/dialect/sql/schema/BUILD create mode 100644 vendor/entgo.io/ent/dialect/sql/schema/atlas.go create mode 100644 vendor/entgo.io/ent/dialect/sql/schema/inspect.go create mode 100644 vendor/entgo.io/ent/dialect/sql/schema/migrate.go create mode 100644 vendor/entgo.io/ent/dialect/sql/schema/mysql.go create mode 100644 vendor/entgo.io/ent/dialect/sql/schema/postgres.go create mode 100644 vendor/entgo.io/ent/dialect/sql/schema/schema.go create mode 100644 vendor/entgo.io/ent/dialect/sql/schema/sqlite.go create mode 100644 vendor/entgo.io/ent/dialect/sql/schema/writer.go create mode 100644 vendor/entgo.io/ent/dialect/sql/sql.go create mode 100644 vendor/entgo.io/ent/dialect/sql/sqlgraph/BUILD create mode 100644 vendor/entgo.io/ent/dialect/sql/sqlgraph/entql.go create mode 100644 vendor/entgo.io/ent/dialect/sql/sqlgraph/errors.go create mode 100644 vendor/entgo.io/ent/dialect/sql/sqlgraph/graph.go create mode 100644 vendor/entgo.io/ent/dialect/sql/sqljson/BUILD create mode 100644 vendor/entgo.io/ent/dialect/sql/sqljson/dialect.go create mode 100644 vendor/entgo.io/ent/dialect/sql/sqljson/sqljson.go create mode 100644 vendor/entgo.io/ent/ent.go create mode 100644 vendor/entgo.io/ent/entql/BUILD create mode 100644 vendor/entgo.io/ent/entql/entql.go create mode 100644 vendor/entgo.io/ent/entql/types.go create mode 100644 vendor/entgo.io/ent/op_string.go create mode 100644 vendor/entgo.io/ent/schema/BUILD create mode 100644 vendor/entgo.io/ent/schema/edge/BUILD create mode 100644 vendor/entgo.io/ent/schema/edge/annotation.go create mode 100644 vendor/entgo.io/ent/schema/edge/edge.go create mode 100644 vendor/entgo.io/ent/schema/field/BUILD create mode 100644 vendor/entgo.io/ent/schema/field/annotation.go create mode 100644 vendor/entgo.io/ent/schema/field/field.go create mode 100644 vendor/entgo.io/ent/schema/field/numeric.go create mode 100644 vendor/entgo.io/ent/schema/field/type.go create mode 100644 vendor/entgo.io/ent/schema/index/BUILD create mode 100644 vendor/entgo.io/ent/schema/index/index.go create mode 100644 vendor/entgo.io/ent/schema/schema.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/.gitignore create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/BUILD create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/LICENSE create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/Makefile create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/README.md create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/check.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/check_listener.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/check_task.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/checks/BUILD create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/checks/custom.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/checks/dns.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/checks/http.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/checks/must.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/checks/ping.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/config.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/health.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/health_listener.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/http/BUILD create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/http/handler.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/options.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/types.go create mode 100644 vendor/github.com/AppsFlyer/go-sundheit/utils.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/.travis.yml create mode 100644 vendor/github.com/Azure/go-ntlmssp/BUILD create mode 100644 vendor/github.com/Azure/go-ntlmssp/LICENSE create mode 100644 vendor/github.com/Azure/go-ntlmssp/README.md create mode 100644 vendor/github.com/Azure/go-ntlmssp/SECURITY.md create mode 100644 vendor/github.com/Azure/go-ntlmssp/authenticate_message.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/authheader.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/avids.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/challenge_message.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/messageheader.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/negotiate_message.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/negotiator.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/nlmp.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/unicode.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/varfield.go create mode 100644 vendor/github.com/Azure/go-ntlmssp/version.go create mode 100644 vendor/github.com/Masterminds/goutils/.travis.yml create mode 100644 vendor/github.com/Masterminds/goutils/BUILD create mode 100644 vendor/github.com/Masterminds/goutils/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/goutils/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/goutils/README.md create mode 100644 vendor/github.com/Masterminds/goutils/appveyor.yml create mode 100644 vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go create mode 100644 vendor/github.com/Masterminds/goutils/randomstringutils.go create mode 100644 vendor/github.com/Masterminds/goutils/stringutils.go create mode 100644 vendor/github.com/Masterminds/goutils/wordutils.go create mode 100644 vendor/github.com/Masterminds/semver/.travis.yml create mode 100644 vendor/github.com/Masterminds/semver/BUILD create mode 100644 vendor/github.com/Masterminds/semver/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/semver/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/semver/Makefile create mode 100644 vendor/github.com/Masterminds/semver/README.md create mode 100644 vendor/github.com/Masterminds/semver/appveyor.yml create mode 100644 vendor/github.com/Masterminds/semver/collection.go create mode 100644 vendor/github.com/Masterminds/semver/constraints.go create mode 100644 vendor/github.com/Masterminds/semver/doc.go create mode 100644 vendor/github.com/Masterminds/semver/v3/.gitignore create mode 100644 vendor/github.com/Masterminds/semver/v3/.golangci.yml create mode 100644 vendor/github.com/Masterminds/semver/v3/BUILD create mode 100644 vendor/github.com/Masterminds/semver/v3/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/semver/v3/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/semver/v3/Makefile create mode 100644 vendor/github.com/Masterminds/semver/v3/README.md create mode 100644 vendor/github.com/Masterminds/semver/v3/collection.go create mode 100644 vendor/github.com/Masterminds/semver/v3/constraints.go create mode 100644 vendor/github.com/Masterminds/semver/v3/doc.go create mode 100644 vendor/github.com/Masterminds/semver/v3/fuzz.go create mode 100644 vendor/github.com/Masterminds/semver/v3/version.go create mode 100644 vendor/github.com/Masterminds/semver/version.go create mode 100644 vendor/github.com/Masterminds/semver/version_fuzz.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/.gitignore create mode 100644 vendor/github.com/Masterminds/sprig/v3/BUILD create mode 100644 vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/sprig/v3/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/sprig/v3/Makefile create mode 100644 vendor/github.com/Masterminds/sprig/v3/README.md create mode 100644 vendor/github.com/Masterminds/sprig/v3/crypto.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/date.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/defaults.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/dict.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/doc.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/functions.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/list.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/network.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/numeric.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/reflect.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/regex.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/semver.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/strings.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/url.go create mode 100644 vendor/github.com/agext/levenshtein/.gitignore create mode 100644 vendor/github.com/agext/levenshtein/.travis.yml create mode 100644 vendor/github.com/agext/levenshtein/BUILD create mode 100644 vendor/github.com/agext/levenshtein/DCO create mode 100644 vendor/github.com/agext/levenshtein/LICENSE create mode 100644 vendor/github.com/agext/levenshtein/MAINTAINERS create mode 100644 vendor/github.com/agext/levenshtein/NOTICE create mode 100644 vendor/github.com/agext/levenshtein/README.md create mode 100644 vendor/github.com/agext/levenshtein/levenshtein.go create mode 100644 vendor/github.com/agext/levenshtein/params.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/LICENSE create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/textseg/BUILD create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/textseg/all_tokens.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/textseg/emoji_table.rl create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/textseg/generate.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/textseg/tables.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/textseg/unicode2ragel.rb create mode 100644 vendor/github.com/apparentlymart/go-textseg/v13/textseg/utf8_seqs.go create mode 100644 vendor/github.com/beevik/etree/BUILD create mode 100644 vendor/github.com/beevik/etree/CONTRIBUTORS create mode 100644 vendor/github.com/beevik/etree/LICENSE create mode 100644 vendor/github.com/beevik/etree/README.md create mode 100644 vendor/github.com/beevik/etree/RELEASE_NOTES.md create mode 100644 vendor/github.com/beevik/etree/etree.go create mode 100644 vendor/github.com/beevik/etree/helpers.go create mode 100644 vendor/github.com/beevik/etree/path.go create mode 100644 vendor/github.com/coreos/go-semver/LICENSE create mode 100644 vendor/github.com/coreos/go-semver/NOTICE create mode 100644 vendor/github.com/coreos/go-semver/semver/BUILD create mode 100644 vendor/github.com/coreos/go-semver/semver/semver.go create mode 100644 vendor/github.com/coreos/go-semver/semver/sort.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/v22/NOTICE create mode 100644 vendor/github.com/coreos/go-systemd/v22/journal/BUILD create mode 100644 vendor/github.com/coreos/go-systemd/v22/journal/journal.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go create mode 100644 vendor/github.com/dexidp/dex/LICENSE create mode 100644 vendor/github.com/dexidp/dex/api/v2/BUILD create mode 100644 vendor/github.com/dexidp/dex/api/v2/LICENSE create mode 100644 vendor/github.com/dexidp/dex/api/v2/api.pb.go create mode 100644 vendor/github.com/dexidp/dex/api/v2/api.proto create mode 100644 vendor/github.com/dexidp/dex/api/v2/api_grpc.pb.go create mode 100644 vendor/github.com/dexidp/dex/cmd/dex/BUILD create mode 100644 vendor/github.com/dexidp/dex/cmd/dex/config.go create mode 100644 vendor/github.com/dexidp/dex/cmd/dex/main.go create mode 100644 vendor/github.com/dexidp/dex/cmd/dex/serve.go create mode 100644 vendor/github.com/dexidp/dex/cmd/dex/version.go create mode 100644 vendor/github.com/dexidp/dex/connector/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/atlassiancrowd/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/atlassiancrowd/atlassiancrowd.go create mode 100644 vendor/github.com/dexidp/dex/connector/authproxy/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/authproxy/authproxy.go create mode 100644 vendor/github.com/dexidp/dex/connector/bitbucketcloud/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/bitbucketcloud/bitbucketcloud.go create mode 100644 vendor/github.com/dexidp/dex/connector/connector.go create mode 100644 vendor/github.com/dexidp/dex/connector/gitea/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/gitea/gitea.go create mode 100644 vendor/github.com/dexidp/dex/connector/github/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/github/github.go create mode 100644 vendor/github.com/dexidp/dex/connector/gitlab/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/gitlab/gitlab.go create mode 100644 vendor/github.com/dexidp/dex/connector/google/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/google/google.go create mode 100644 vendor/github.com/dexidp/dex/connector/keystone/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/keystone/keystone.go create mode 100644 vendor/github.com/dexidp/dex/connector/ldap/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/ldap/gen-certs.sh create mode 100644 vendor/github.com/dexidp/dex/connector/ldap/ldap.go create mode 100644 vendor/github.com/dexidp/dex/connector/linkedin/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/linkedin/linkedin.go create mode 100644 vendor/github.com/dexidp/dex/connector/microsoft/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/microsoft/microsoft.go create mode 100644 vendor/github.com/dexidp/dex/connector/mock/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/mock/connectortest.go create mode 100644 vendor/github.com/dexidp/dex/connector/oauth/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/oauth/oauth.go create mode 100644 vendor/github.com/dexidp/dex/connector/oidc/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/oidc/oidc.go create mode 100644 vendor/github.com/dexidp/dex/connector/openshift/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/openshift/openshift.go create mode 100644 vendor/github.com/dexidp/dex/connector/saml/BUILD create mode 100644 vendor/github.com/dexidp/dex/connector/saml/saml.go create mode 100644 vendor/github.com/dexidp/dex/connector/saml/types.go create mode 100644 vendor/github.com/dexidp/dex/pkg/groups/BUILD create mode 100644 vendor/github.com/dexidp/dex/pkg/groups/groups.go create mode 100644 vendor/github.com/dexidp/dex/pkg/httpclient/BUILD create mode 100644 vendor/github.com/dexidp/dex/pkg/httpclient/httpclient.go create mode 100644 vendor/github.com/dexidp/dex/pkg/httpclient/readme.md create mode 100644 vendor/github.com/dexidp/dex/pkg/log/BUILD create mode 100644 vendor/github.com/dexidp/dex/pkg/log/deprecated.go create mode 100644 vendor/github.com/dexidp/dex/pkg/log/logger.go create mode 100644 vendor/github.com/dexidp/dex/server/BUILD create mode 100644 vendor/github.com/dexidp/dex/server/api.go create mode 100644 vendor/github.com/dexidp/dex/server/deviceflowhandlers.go create mode 100644 vendor/github.com/dexidp/dex/server/doc.go create mode 100644 vendor/github.com/dexidp/dex/server/handlers.go create mode 100644 vendor/github.com/dexidp/dex/server/internal/BUILD create mode 100644 vendor/github.com/dexidp/dex/server/internal/codec.go create mode 100644 vendor/github.com/dexidp/dex/server/internal/types.pb.go create mode 100644 vendor/github.com/dexidp/dex/server/internal/types.proto create mode 100644 vendor/github.com/dexidp/dex/server/oauth2.go create mode 100644 vendor/github.com/dexidp/dex/server/refreshhandlers.go create mode 100644 vendor/github.com/dexidp/dex/server/rotation.go create mode 100644 vendor/github.com/dexidp/dex/server/server.go create mode 100644 vendor/github.com/dexidp/dex/server/templates.go create mode 100644 vendor/github.com/dexidp/dex/storage/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/doc.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/authcode.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/authrequest.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/client.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/connector.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/devicerequest.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/devicetoken.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/keys.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/main.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/offlinesession.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/password.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/refreshtoken.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/types.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/client/utils.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authcode.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authcode/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authcode/authcode.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authcode/where.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authcode_create.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authcode_delete.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authcode_query.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authcode_update.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authrequest.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authrequest/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authrequest/authrequest.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authrequest/where.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authrequest_create.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authrequest_delete.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authrequest_query.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/authrequest_update.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/client.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/connector.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/connector/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/connector/connector.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/connector/where.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/connector_create.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/connector_delete.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/connector_query.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/connector_update.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicerequest.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/devicerequest.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/where.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_create.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_delete.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_query.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_update.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicetoken.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/devicetoken.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/where.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_create.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_delete.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_query.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_update.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/ent.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/keys.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/keys/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/keys/keys.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/keys/where.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/keys_create.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/keys_delete.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/keys_query.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/keys_update.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/migrate/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/migrate/migrate.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/migrate/schema.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/mutation.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/oauth2client.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/oauth2client.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/where.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_create.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_delete.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_query.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_update.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/offlinesession.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/offlinesession.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/where.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_create.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_delete.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_query.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_update.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/password.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/password/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/password/password.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/password/where.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/password_create.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/password_delete.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/password_query.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/password_update.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/predicate/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/predicate/predicate.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/refreshtoken.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/where.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_create.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_delete.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_query.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_update.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/runtime.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/db/tx.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/generate.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/mysql.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/postgres.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/authcode.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/authrequest.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/client.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/connector.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/devicerequest.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/devicetoken.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/dialects.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/keys.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/offlinesession.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/password.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/schema/refreshtoken.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/sqlite.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/types.go create mode 100644 vendor/github.com/dexidp/dex/storage/ent/utils.go create mode 100644 vendor/github.com/dexidp/dex/storage/etcd/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/etcd/config.go create mode 100644 vendor/github.com/dexidp/dex/storage/etcd/etcd.go create mode 100644 vendor/github.com/dexidp/dex/storage/etcd/types.go create mode 100644 vendor/github.com/dexidp/dex/storage/health.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/client.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/doc.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/client.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/crd_extensions.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/doc.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/extensions.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/time.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/unversioned.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/v1.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/lock.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/storage.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/transport.go create mode 100644 vendor/github.com/dexidp/dex/storage/kubernetes/types.go create mode 100644 vendor/github.com/dexidp/dex/storage/memory/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/memory/memory.go create mode 100644 vendor/github.com/dexidp/dex/storage/sql/BUILD create mode 100644 vendor/github.com/dexidp/dex/storage/sql/config.go create mode 100644 vendor/github.com/dexidp/dex/storage/sql/crud.go create mode 100644 vendor/github.com/dexidp/dex/storage/sql/migrate.go create mode 100644 vendor/github.com/dexidp/dex/storage/sql/sql.go create mode 100644 vendor/github.com/dexidp/dex/storage/sql/sqlite.go create mode 100644 vendor/github.com/dexidp/dex/storage/static.go create mode 100644 vendor/github.com/dexidp/dex/storage/storage.go create mode 100644 vendor/github.com/dexidp/dex/web/BUILD create mode 100644 vendor/github.com/dexidp/dex/web/robots.txt create mode 100644 vendor/github.com/dexidp/dex/web/static/img/atlassian-crowd-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/bitbucket-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/email-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/gitea-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/github-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/gitlab-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/google-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/keystone-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/ldap-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/linkedin-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/microsoft-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/oidc-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/img/saml-icon.svg create mode 100644 vendor/github.com/dexidp/dex/web/static/main.css create mode 100644 vendor/github.com/dexidp/dex/web/templates/approval.html create mode 100644 vendor/github.com/dexidp/dex/web/templates/device.html create mode 100644 vendor/github.com/dexidp/dex/web/templates/device_success.html create mode 100644 vendor/github.com/dexidp/dex/web/templates/error.html create mode 100644 vendor/github.com/dexidp/dex/web/templates/footer.html create mode 100644 vendor/github.com/dexidp/dex/web/templates/header.html create mode 100644 vendor/github.com/dexidp/dex/web/templates/login.html create mode 100644 vendor/github.com/dexidp/dex/web/templates/oob.html create mode 100644 vendor/github.com/dexidp/dex/web/templates/password.html create mode 100644 vendor/github.com/dexidp/dex/web/themes/dark/favicon.png create mode 100644 vendor/github.com/dexidp/dex/web/themes/dark/logo.png create mode 100644 vendor/github.com/dexidp/dex/web/themes/dark/styles.css create mode 100644 vendor/github.com/dexidp/dex/web/themes/light/favicon.png create mode 100644 vendor/github.com/dexidp/dex/web/themes/light/logo.png create mode 100644 vendor/github.com/dexidp/dex/web/themes/light/styles.css create mode 100644 vendor/github.com/dexidp/dex/web/web.go create mode 100644 vendor/github.com/felixge/httpsnoop/.gitignore create mode 100644 vendor/github.com/felixge/httpsnoop/.travis.yml create mode 100644 vendor/github.com/felixge/httpsnoop/BUILD create mode 100644 vendor/github.com/felixge/httpsnoop/LICENSE.txt create mode 100644 vendor/github.com/felixge/httpsnoop/Makefile create mode 100644 vendor/github.com/felixge/httpsnoop/README.md create mode 100644 vendor/github.com/felixge/httpsnoop/capture_metrics.go create mode 100644 vendor/github.com/felixge/httpsnoop/docs.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go create mode 100644 vendor/github.com/ghodss/yaml/.gitignore create mode 100644 vendor/github.com/ghodss/yaml/.travis.yml create mode 100644 vendor/github.com/ghodss/yaml/BUILD create mode 100644 vendor/github.com/ghodss/yaml/LICENSE create mode 100644 vendor/github.com/ghodss/yaml/README.md create mode 100644 vendor/github.com/ghodss/yaml/fields.go create mode 100644 vendor/github.com/ghodss/yaml/yaml.go create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/BUILD create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/LICENSE create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/README.md create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/ber.go create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/content_int.go create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/header.go create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/identifier.go create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/length.go create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/real.go create mode 100644 vendor/github.com/go-asn1-ber/asn1-ber/util.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/.gitignore create mode 100644 vendor/github.com/go-jose/go-jose/v3/.golangci.yml create mode 100644 vendor/github.com/go-jose/go-jose/v3/.travis.yml create mode 100644 vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/BUILD create mode 100644 vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/LICENSE create mode 100644 vendor/github.com/go-jose/go-jose/v3/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/asymmetric.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/BUILD create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/crypter.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/doc.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/encoding.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/BUILD create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/LICENSE create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/decode.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/encode.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/indent.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/scanner.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/stream.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/tags.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwe.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwk.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jws.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/opaque.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/shared.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/signing.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/symmetric.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/BUILD create mode 100644 vendor/github.com/go-ldap/ldap/v3/LICENSE create mode 100644 vendor/github.com/go-ldap/ldap/v3/add.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/bind.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/client.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/compare.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/conn.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/control.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/debug.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/del.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/dn.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/doc.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/error.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/filter.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/ldap.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/moddn.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/modify.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/passwdmodify.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/request.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/search.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/unbind.go create mode 100644 vendor/github.com/go-ldap/ldap/v3/whoami.go create mode 100644 vendor/github.com/go-openapi/inflect/.hgignore create mode 100644 vendor/github.com/go-openapi/inflect/BUILD create mode 100644 vendor/github.com/go-openapi/inflect/LICENCE create mode 100644 vendor/github.com/go-openapi/inflect/README create mode 100644 vendor/github.com/go-openapi/inflect/inflect.go create mode 100644 vendor/github.com/go-sql-driver/mysql/.gitignore create mode 100644 vendor/github.com/go-sql-driver/mysql/AUTHORS create mode 100644 vendor/github.com/go-sql-driver/mysql/BUILD create mode 100644 vendor/github.com/go-sql-driver/mysql/CHANGELOG.md create mode 100644 vendor/github.com/go-sql-driver/mysql/LICENSE create mode 100644 vendor/github.com/go-sql-driver/mysql/README.md create mode 100644 vendor/github.com/go-sql-driver/mysql/atomic_bool.go create mode 100644 vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go create mode 100644 vendor/github.com/go-sql-driver/mysql/auth.go create mode 100644 vendor/github.com/go-sql-driver/mysql/buffer.go create mode 100644 vendor/github.com/go-sql-driver/mysql/collations.go create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck.go create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go create mode 100644 vendor/github.com/go-sql-driver/mysql/connection.go create mode 100644 vendor/github.com/go-sql-driver/mysql/connector.go create mode 100644 vendor/github.com/go-sql-driver/mysql/const.go create mode 100644 vendor/github.com/go-sql-driver/mysql/driver.go create mode 100644 vendor/github.com/go-sql-driver/mysql/dsn.go create mode 100644 vendor/github.com/go-sql-driver/mysql/errors.go create mode 100644 vendor/github.com/go-sql-driver/mysql/fields.go create mode 100644 vendor/github.com/go-sql-driver/mysql/fuzz.go create mode 100644 vendor/github.com/go-sql-driver/mysql/infile.go create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime.go create mode 100644 vendor/github.com/go-sql-driver/mysql/packets.go create mode 100644 vendor/github.com/go-sql-driver/mysql/result.go create mode 100644 vendor/github.com/go-sql-driver/mysql/rows.go create mode 100644 vendor/github.com/go-sql-driver/mysql/statement.go create mode 100644 vendor/github.com/go-sql-driver/mysql/transaction.go create mode 100644 vendor/github.com/go-sql-driver/mysql/utils.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go create mode 100644 vendor/github.com/gorilla/handlers/BUILD create mode 100644 vendor/github.com/gorilla/handlers/LICENSE create mode 100644 vendor/github.com/gorilla/handlers/README.md create mode 100644 vendor/github.com/gorilla/handlers/canonical.go create mode 100644 vendor/github.com/gorilla/handlers/compress.go create mode 100644 vendor/github.com/gorilla/handlers/cors.go create mode 100644 vendor/github.com/gorilla/handlers/doc.go create mode 100644 vendor/github.com/gorilla/handlers/handlers.go create mode 100644 vendor/github.com/gorilla/handlers/logging.go create mode 100644 vendor/github.com/gorilla/handlers/proxy_headers.go create mode 100644 vendor/github.com/gorilla/handlers/recovery.go create mode 100644 vendor/github.com/gorilla/mux/AUTHORS create mode 100644 vendor/github.com/gorilla/mux/BUILD create mode 100644 vendor/github.com/gorilla/mux/LICENSE create mode 100644 vendor/github.com/gorilla/mux/README.md create mode 100644 vendor/github.com/gorilla/mux/doc.go create mode 100644 vendor/github.com/gorilla/mux/middleware.go create mode 100644 vendor/github.com/gorilla/mux/mux.go create mode 100644 vendor/github.com/gorilla/mux/regexp.go create mode 100644 vendor/github.com/gorilla/mux/route.go create mode 100644 vendor/github.com/gorilla/mux/test_helpers.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/BUILD create mode 100644 vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/hcl/v2/LICENSE create mode 100644 vendor/github.com/hashicorp/hcl/v2/README.md create mode 100644 vendor/github.com/hashicorp/hcl/v2/diagnostic.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/didyoumean.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/doc.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/eval_context.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/expr_call.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/expr_list.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/expr_map.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/customdecode/BUILD create mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/customdecode/README.md create mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/BUILD create mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md create mode 100644 vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/BUILD create mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/gohcl/types.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclparse/BUILD create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/BUILD create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/token_type_string.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/unicode2ragel.rb create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/unicode_derived.rl create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/variables.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclsyntax/walk.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/BUILD create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/doc.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/format.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/BUILD create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/ast.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/doc.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/is.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/navigation.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/peeker.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/public.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/spec.md create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/structure.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/merged.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/ops.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/pos.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/pos_scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/schema.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/spec.md create mode 100644 vendor/github.com/hashicorp/hcl/v2/static_expr.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/structure.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/traversal.go create mode 100644 vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go create mode 100644 vendor/github.com/huandu/xstrings/.gitignore create mode 100644 vendor/github.com/huandu/xstrings/BUILD create mode 100644 vendor/github.com/huandu/xstrings/CONTRIBUTING.md create mode 100644 vendor/github.com/huandu/xstrings/LICENSE create mode 100644 vendor/github.com/huandu/xstrings/README.md create mode 100644 vendor/github.com/huandu/xstrings/common.go create mode 100644 vendor/github.com/huandu/xstrings/convert.go create mode 100644 vendor/github.com/huandu/xstrings/count.go create mode 100644 vendor/github.com/huandu/xstrings/doc.go create mode 100644 vendor/github.com/huandu/xstrings/format.go create mode 100644 vendor/github.com/huandu/xstrings/manipulate.go create mode 100644 vendor/github.com/huandu/xstrings/stringbuilder.go create mode 100644 vendor/github.com/huandu/xstrings/stringbuilder_go110.go create mode 100644 vendor/github.com/huandu/xstrings/translate.go create mode 100644 vendor/github.com/imdario/mergo/.deepsource.toml create mode 100644 vendor/github.com/imdario/mergo/.gitignore create mode 100644 vendor/github.com/imdario/mergo/.travis.yml create mode 100644 vendor/github.com/imdario/mergo/BUILD create mode 100644 vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/imdario/mergo/LICENSE create mode 100644 vendor/github.com/imdario/mergo/README.md create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/BUILD create mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go create mode 100644 vendor/github.com/mattermost/xml-roundtrip-validator/BUILD create mode 100644 vendor/github.com/mattermost/xml-roundtrip-validator/LICENSE.txt create mode 100644 vendor/github.com/mattermost/xml-roundtrip-validator/README.md create mode 100644 vendor/github.com/mattermost/xml-roundtrip-validator/SECURITY.md create mode 100644 vendor/github.com/mattermost/xml-roundtrip-validator/validator.go create mode 100644 vendor/github.com/mattn/go-sqlite3/.codecov.yml create mode 100644 vendor/github.com/mattn/go-sqlite3/.gitignore create mode 100644 vendor/github.com/mattn/go-sqlite3/BUILD create mode 100644 vendor/github.com/mattn/go-sqlite3/LICENSE create mode 100644 vendor/github.com/mattn/go-sqlite3/README.md create mode 100644 vendor/github.com/mattn/go-sqlite3/backup.go create mode 100644 vendor/github.com/mattn/go-sqlite3/callback.go create mode 100644 vendor/github.com/mattn/go-sqlite3/convert.go create mode 100644 vendor/github.com/mattn/go-sqlite3/doc.go create mode 100644 vendor/github.com/mattn/go-sqlite3/error.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_context.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_math_functions.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_os_trace.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize_omit.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_other.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_type.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3ext.h create mode 100644 vendor/github.com/mattn/go-sqlite3/static_mock.go create mode 100644 vendor/github.com/mitchellh/copystructure/.travis.yml create mode 100644 vendor/github.com/mitchellh/copystructure/BUILD create mode 100644 vendor/github.com/mitchellh/copystructure/LICENSE create mode 100644 vendor/github.com/mitchellh/copystructure/README.md create mode 100644 vendor/github.com/mitchellh/copystructure/copier_time.go create mode 100644 vendor/github.com/mitchellh/copystructure/copystructure.go create mode 100644 vendor/github.com/mitchellh/go-wordwrap/BUILD create mode 100644 vendor/github.com/mitchellh/go-wordwrap/LICENSE.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/README.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/wordwrap.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/.travis.yml create mode 100644 vendor/github.com/mitchellh/reflectwalk/BUILD create mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE create mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md create mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go create mode 100644 vendor/github.com/oklog/run/.gitignore create mode 100644 vendor/github.com/oklog/run/BUILD create mode 100644 vendor/github.com/oklog/run/LICENSE create mode 100644 vendor/github.com/oklog/run/README.md create mode 100644 vendor/github.com/oklog/run/actors.go create mode 100644 vendor/github.com/oklog/run/group.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/BUILD create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/.gitignore create mode 100644 vendor/github.com/russellhaering/goxmldsig/.travis.yml create mode 100644 vendor/github.com/russellhaering/goxmldsig/BUILD create mode 100644 vendor/github.com/russellhaering/goxmldsig/LICENSE create mode 100644 vendor/github.com/russellhaering/goxmldsig/README.md create mode 100644 vendor/github.com/russellhaering/goxmldsig/canonicalize.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/clock.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/etreeutils/BUILD create mode 100644 vendor/github.com/russellhaering/goxmldsig/etreeutils/canonicalize.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/etreeutils/namespace.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/etreeutils/sort.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/etreeutils/unmarshal.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/keystore.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/run_test.sh create mode 100644 vendor/github.com/russellhaering/goxmldsig/sign.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/tls_keystore.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/types/BUILD create mode 100644 vendor/github.com/russellhaering/goxmldsig/types/signature.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/validate.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/xml_constants.go create mode 100644 vendor/github.com/shopspring/decimal/.gitignore create mode 100644 vendor/github.com/shopspring/decimal/.travis.yml create mode 100644 vendor/github.com/shopspring/decimal/BUILD create mode 100644 vendor/github.com/shopspring/decimal/CHANGELOG.md create mode 100644 vendor/github.com/shopspring/decimal/LICENSE create mode 100644 vendor/github.com/shopspring/decimal/README.md create mode 100644 vendor/github.com/shopspring/decimal/decimal-go.go create mode 100644 vendor/github.com/shopspring/decimal/decimal.go create mode 100644 vendor/github.com/shopspring/decimal/rounding.go create mode 100644 vendor/github.com/spf13/cast/.gitignore create mode 100644 vendor/github.com/spf13/cast/BUILD create mode 100644 vendor/github.com/spf13/cast/LICENSE create mode 100644 vendor/github.com/spf13/cast/Makefile create mode 100644 vendor/github.com/spf13/cast/README.md create mode 100644 vendor/github.com/spf13/cast/cast.go create mode 100644 vendor/github.com/spf13/cast/caste.go create mode 100644 vendor/github.com/spf13/cast/timeformattype_string.go create mode 100644 vendor/github.com/spf13/cobra/.gitignore create mode 100644 vendor/github.com/spf13/cobra/.golangci.yml create mode 100644 vendor/github.com/spf13/cobra/.mailmap create mode 100644 vendor/github.com/spf13/cobra/BUILD create mode 100644 vendor/github.com/spf13/cobra/CONDUCT.md create mode 100644 vendor/github.com/spf13/cobra/CONTRIBUTING.md create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 vendor/github.com/spf13/cobra/MAINTAINERS create mode 100644 vendor/github.com/spf13/cobra/Makefile create mode 100644 vendor/github.com/spf13/cobra/README.md create mode 100644 vendor/github.com/spf13/cobra/active_help.go create mode 100644 vendor/github.com/spf13/cobra/active_help.md create mode 100644 vendor/github.com/spf13/cobra/args.go create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 vendor/github.com/spf13/cobra/bash_completions.md create mode 100644 vendor/github.com/spf13/cobra/bash_completionsV2.go create mode 100644 vendor/github.com/spf13/cobra/cobra.go create mode 100644 vendor/github.com/spf13/cobra/command.go create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 vendor/github.com/spf13/cobra/command_win.go create mode 100644 vendor/github.com/spf13/cobra/completions.go create mode 100644 vendor/github.com/spf13/cobra/fish_completions.go create mode 100644 vendor/github.com/spf13/cobra/fish_completions.md create mode 100644 vendor/github.com/spf13/cobra/flag_groups.go create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.go create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md create mode 100644 vendor/github.com/spf13/cobra/projects_using_cobra.md create mode 100644 vendor/github.com/spf13/cobra/shell_completions.go create mode 100644 vendor/github.com/spf13/cobra/shell_completions.md create mode 100644 vendor/github.com/spf13/cobra/user_guide.md create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.go create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md create mode 100644 vendor/github.com/zclconf/go-cty/LICENSE create mode 100644 vendor/github.com/zclconf/go-cty/cty/BUILD create mode 100644 vendor/github.com/zclconf/go-cty/cty/capsule.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/capsule_ops.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/collection.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/BUILD create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_capsule.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/public.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/unify.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/element_iterator.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/error.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/BUILD create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/argument.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/error.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/function.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/BUILD create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gob.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/BUILD create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/in.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/out.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/helper.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/BUILD create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/marshal.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/simple.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/type_implied.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/value.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/list_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/map_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/marks.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/null.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/object_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/path.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/path_set.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/primitive_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/BUILD create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/gob.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/iterator.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/ops.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/rules.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/set.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set_helper.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set_internals.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/tuple_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/type_conform.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/types_to_register.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/unknown.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/value.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/value_init.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/value_ops.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/walk.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/LICENSE create mode 100644 vendor/go.etcd.io/etcd/api/v3/authpb/BUILD create mode 100644 vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/BUILD create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/membershippb/BUILD create mode 100644 vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/mvccpb/BUILD create mode 100644 vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto create mode 100644 vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/BUILD create mode 100644 vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/doc.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/md.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go create mode 100644 vendor/go.etcd.io/etcd/api/v3/version/BUILD create mode 100644 vendor/go.etcd.io/etcd/api/v3/version/version.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_plan9.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/read_dir.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/logutil/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/logutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/systemd/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/systemd/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_conn.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_dialer.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_listener.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_transport.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/unix_listener.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/slice.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/types/urlsmap.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/v3/LICENSE create mode 100644 vendor/go.etcd.io/etcd/client/v3/README.md create mode 100644 vendor/go.etcd.io/etcd/client/v3/auth.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/client.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/cluster.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/compact_op.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/compare.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/config.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/credentials/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/ctx.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/internal/endpoint/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/internal/resolver/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/kv.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/lease.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/logger.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/maintenance.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/namespace/BUILD create mode 100644 vendor/go.etcd.io/etcd/client/v3/namespace/doc.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/namespace/kv.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/namespace/lease.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/namespace/util.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/namespace/watch.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/op.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/options.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/retry.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/sort.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/txn.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/utils.go create mode 100644 vendor/go.etcd.io/etcd/client/v3/watch.go create mode 100644 vendor/go.uber.org/zap/zapgrpc/BUILD create mode 100644 vendor/go.uber.org/zap/zapgrpc/zapgrpc.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/BUILD create mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go create mode 100644 vendor/golang.org/x/crypto/blowfish/BUILD create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go create mode 100644 vendor/golang.org/x/crypto/md4/BUILD create mode 100644 vendor/golang.org/x/crypto/md4/md4.go create mode 100644 vendor/golang.org/x/crypto/md4/md4block.go create mode 100644 vendor/golang.org/x/crypto/scrypt/BUILD create mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go create mode 100644 vendor/golang.org/x/mod/internal/lazyregexp/BUILD create mode 100644 vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go create mode 100644 vendor/golang.org/x/mod/modfile/BUILD create mode 100644 vendor/golang.org/x/mod/modfile/print.go create mode 100644 vendor/golang.org/x/mod/modfile/read.go create mode 100644 vendor/golang.org/x/mod/modfile/rule.go create mode 100644 vendor/golang.org/x/mod/modfile/work.go create mode 100644 vendor/golang.org/x/mod/module/BUILD create mode 100644 vendor/golang.org/x/mod/module/module.go create mode 100644 vendor/golang.org/x/mod/module/pseudo.go create mode 100644 vendor/golang.org/x/net/html/atom/atom.go create mode 100644 vendor/golang.org/x/net/html/atom/table.go create mode 100644 vendor/golang.org/x/net/html/const.go create mode 100644 vendor/golang.org/x/net/html/doc.go create mode 100644 vendor/golang.org/x/net/html/doctype.go create mode 100644 vendor/golang.org/x/net/html/entity.go create mode 100644 vendor/golang.org/x/net/html/escape.go create mode 100644 vendor/golang.org/x/net/html/foreign.go create mode 100644 vendor/golang.org/x/net/html/node.go create mode 100644 vendor/golang.org/x/net/html/parse.go create mode 100644 vendor/golang.org/x/net/html/render.go create mode 100644 vendor/golang.org/x/net/html/token.go create mode 100644 vendor/golang.org/x/oauth2/bitbucket/bitbucket.go create mode 100644 vendor/golang.org/x/oauth2/github/github.go create mode 100644 vendor/google.golang.org/api/admin/directory/v1/admin-api.json create mode 100644 vendor/google.golang.org/api/admin/directory/v1/admin-gen.go create mode 100644 vendor/google.golang.org/grpc/resolver/manual/manual.go diff --git a/deps.bzl b/deps.bzl index 53052bd4..7e48b6ec 100644 --- a/deps.bzl +++ b/deps.bzl @@ -21,6 +21,13 @@ def go_dependencies(): sum = "h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=", version = "v0.0.1-2020.1.4", ) + go_repository( + name = "com_github_agext_levenshtein", + importpath = "github.com/agext/levenshtein", + sum = "h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=", + version = "v1.2.1", + ) + go_repository( name = "com_github_ajstarks_svgo", importpath = "github.com/ajstarks/svgo", @@ -47,6 +54,13 @@ def go_dependencies(): sum = "h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=", version = "v0.0.0-20211218093645-b94a6e3cc137", ) + go_repository( + name = "com_github_alexbrainman_sspi", + importpath = "github.com/alexbrainman/sspi", + sum = "h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=", + version = "v0.0.0-20210105120005-909beea2cc74", + ) + go_repository( name = "com_github_antihax_optional", importpath = "github.com/antihax/optional", @@ -65,6 +79,24 @@ def go_dependencies(): sum = "h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY=", version = "v0.16.0", ) + go_repository( + name = "com_github_apparentlymart_go_dump", + importpath = "github.com/apparentlymart/go-dump", + sum = "h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA=", + version = "v0.0.0-20180507223929-23540a00eaa3", + ) + go_repository( + name = "com_github_apparentlymart_go_textseg_v13", + importpath = "github.com/apparentlymart/go-textseg/v13", + sum = "h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=", + version = "v13.0.0", + ) + go_repository( + name = "com_github_appsflyer_go_sundheit", + importpath = "github.com/AppsFlyer/go-sundheit", + sum = "h1:/VxpyigCfJrq1r97mn9HPiAB2qrhcTFHwNIIDr15CZM=", + version = "v0.5.0", + ) go_repository( name = "com_github_armon_go_socks5", @@ -78,6 +110,12 @@ def go_dependencies(): sum = "h1:FNW3Tb8vKvXLZ7lzGlg/dCAXhK4RC5fyFewD11oJhUM=", version = "v1.44.41", ) + go_repository( + name = "com_github_azure_go_ntlmssp", + importpath = "github.com/Azure/go-ntlmssp", + sum = "h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=", + version = "v0.0.0-20221128193559-754e69321358", + ) go_repository( name = "com_github_bahlo_generic_list_go", @@ -97,6 +135,12 @@ def go_dependencies(): sum = "h1:YWJ+hbwEOB/PtIFCRMDnvWVSpwPFFGEpdIB6E3bt8X4=", version = "v0.39.0", ) + go_repository( + name = "com_github_beevik_etree", + importpath = "github.com/beevik/etree", + sum = "h1:l7WETslUG/T+xOPs47dtd6jov2Ii/8/OjCldk5fYfQw=", + version = "v1.2.0", + ) go_repository( name = "com_github_benbjohnson_clock", @@ -249,11 +293,24 @@ def go_dependencies(): sum = "h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=", version = "v0.0.0-20230607035331-e9ce68804cb4", ) + go_repository( name = "com_github_coreos_go_oidc_v3", importpath = "github.com/coreos/go-oidc/v3", - sum = "h1:6avEvcdvTa1qYsOZ6I5PRkSYHzpTNWgKYmaJfaYbrRw=", - version = "v3.1.0", + sum = "h1:AKVxfYw1Gmkn/w96z0DbT/B/xFnzTd3MkZvWLjF4n/o=", + version = "v3.6.0", + ) + go_repository( + name = "com_github_coreos_go_semver", + importpath = "github.com/coreos/go-semver", + sum = "h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_coreos_go_systemd_v22", + importpath = "github.com/coreos/go-systemd/v22", + sum = "h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=", + version = "v22.3.2", ) go_repository( @@ -274,6 +331,12 @@ def go_dependencies(): sum = "h1:WR1qVJzbvrVywhAk4kMQKRPx09AZVI0NdEdYs59iHcA=", version = "v0.0.0-20160816171116-049aabb0122b", ) + go_repository( + name = "com_github_data_dog_go_sqlmock", + importpath = "github.com/DATA-DOG/go-sqlmock", + sum = "h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=", + version = "v1.5.0", + ) go_repository( name = "com_github_davecgh_go_spew", @@ -281,6 +344,19 @@ def go_dependencies(): sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=", version = "v1.1.1", ) + go_repository( + name = "com_github_dexidp_dex", + importpath = "github.com/dexidp/dex", + sum = "h1:y86PLEHr+sm6OyWS37VGPcGBaj+u51lltfrLPCcg3KA=", + version = "v0.0.0-20230804184036-a9d1fd31c329", + ) + go_repository( + name = "com_github_dexidp_dex_api_v2", + importpath = "github.com/dexidp/dex/api/v2", + sum = "h1:2xadZzg7cmAXrlTYRmBGXXsnLwr+vYYknW9IxzU3Vtg=", + version = "v2.1.1-0.20230804184036-a9d1fd31c329", + ) + go_repository( name = "com_github_dgryski_go_farm", importpath = "github.com/dgryski/go-farm", @@ -337,6 +413,12 @@ def go_dependencies(): sum = "h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=", version = "v1.13.0", ) + go_repository( + name = "com_github_felixge_httpsnoop", + importpath = "github.com/felixge/httpsnoop", + sum = "h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=", + version = "v1.0.3", + ) go_repository( name = "com_github_fergusstrange_embedded_postgres", @@ -382,6 +464,12 @@ def go_dependencies(): sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=", version = "v1.0.0", ) + go_repository( + name = "com_github_go_asn1_ber_asn1_ber", + importpath = "github.com/go-asn1-ber/asn1-ber", + sum = "h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=", + version = "v1.5.4", + ) go_repository( name = "com_github_go_errors_errors", @@ -401,6 +489,13 @@ def go_dependencies(): sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=", version = "v0.0.0-20200222043503-6f7a984d4dc4", ) + go_repository( + name = "com_github_go_jose_go_jose_v3", + importpath = "github.com/go-jose/go-jose/v3", + sum = "h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=", + version = "v3.0.0", + ) + go_repository( name = "com_github_go_kit_kit", importpath = "github.com/go-kit/kit", @@ -414,6 +509,13 @@ def go_dependencies(): sum = "h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=", version = "v0.2.1", ) + go_repository( + name = "com_github_go_ldap_ldap_v3", + importpath = "github.com/go-ldap/ldap/v3", + sum = "h1:ekEKmaDrpvR2yf5Nc/DClsGG9lAmdDixe44mLzlW5r8=", + version = "v3.4.5", + ) + go_repository( name = "com_github_go_logfmt_logfmt", importpath = "github.com/go-logfmt/logfmt", @@ -433,6 +535,12 @@ def go_dependencies(): sum = "h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=", version = "v1.2.2", ) + go_repository( + name = "com_github_go_openapi_inflect", + importpath = "github.com/go-openapi/inflect", + sum = "h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=", + version = "v0.19.0", + ) go_repository( name = "com_github_go_openapi_jsonpointer", @@ -455,8 +563,8 @@ def go_dependencies(): go_repository( name = "com_github_go_sql_driver_mysql", importpath = "github.com/go-sql-driver/mysql", - sum = "h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=", - version = "v1.6.0", + sum = "h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=", + version = "v1.7.1", ) go_repository( @@ -472,12 +580,26 @@ def go_dependencies(): sum = "h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=", version = "v0.0.0-20230315185526-52ccab3ef572", ) + go_repository( + name = "com_github_go_test_deep", + importpath = "github.com/go-test/deep", + sum = "h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=", + version = "v1.0.3", + ) + go_repository( name = "com_github_gocql_gocql", importpath = "github.com/gocql/gocql", sum = "h1:TZhsCd7fRuye4VyHr3WCvWwIQaZUmjsqnSIXK9FcVCE=", version = "v1.2.0", ) + go_repository( + name = "com_github_godbus_dbus_v5", + importpath = "github.com/godbus/dbus/v5", + sum = "h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=", + version = "v5.0.4", + ) + go_repository( name = "com_github_gogo_gateway", importpath = "github.com/gogo/gateway", @@ -574,6 +696,13 @@ def go_dependencies(): sum = "h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=", version = "v0.5.9", ) + go_repository( + name = "com_github_google_go_pkcs11", + importpath = "github.com/google/go-pkcs11", + sum = "h1:5meDPB26aJ98f+K9G21f0AqZwo/S5BJMJh8nuhMbdsI=", + version = "v0.2.0", + ) + go_repository( name = "com_github_google_gofuzz", importpath = "github.com/google/gofuzz", @@ -635,14 +764,14 @@ def go_dependencies(): go_repository( name = "com_github_googleapis_enterprise_certificate_proxy", importpath = "github.com/googleapis/enterprise-certificate-proxy", - sum = "h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=", - version = "v0.2.3", + sum = "h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=", + version = "v0.2.5", ) go_repository( name = "com_github_googleapis_gax_go_v2", importpath = "github.com/googleapis/gax-go/v2", - sum = "h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=", - version = "v2.11.0", + sum = "h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=", + version = "v2.12.0", ) go_repository( name = "com_github_googleapis_go_type_adapters", @@ -657,6 +786,19 @@ def go_dependencies(): sum = "h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=", version = "v1.17.2", ) + go_repository( + name = "com_github_gorilla_handlers", + importpath = "github.com/gorilla/handlers", + sum = "h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=", + version = "v1.5.1", + ) + go_repository( + name = "com_github_gorilla_mux", + importpath = "github.com/gorilla/mux", + sum = "h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=", + version = "v1.8.0", + ) + go_repository( name = "com_github_gorilla_securecookie", importpath = "github.com/gorilla/securecookie", @@ -703,6 +845,7 @@ def go_dependencies(): sum = "h1:dygLcbEBA+t/P7ck6a8AkXv6juQ4cK0RHBoh32jxhHM=", version = "v2.16.2", ) + go_repository( name = "com_github_hailocab_go_hostpool", importpath = "github.com/hailocab/go-hostpool", @@ -715,12 +858,25 @@ def go_dependencies(): sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=", version = "v0.5.1", ) + go_repository( + name = "com_github_hashicorp_hcl_v2", + importpath = "github.com/hashicorp/hcl/v2", + sum = "h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc=", + version = "v2.13.0", + ) + go_repository( name = "com_github_hdrhistogram_hdrhistogram_go", importpath = "github.com/HdrHistogram/hdrhistogram-go", sum = "h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=", version = "v1.1.2", ) + go_repository( + name = "com_github_huandu_xstrings", + importpath = "github.com/huandu/xstrings", + sum = "h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=", + version = "v1.3.3", + ) go_repository( name = "com_github_iancoleman_strcase", @@ -738,8 +894,8 @@ def go_dependencies(): go_repository( name = "com_github_imdario_mergo", importpath = "github.com/imdario/mergo", - sum = "h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=", - version = "v0.3.6", + sum = "h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=", + version = "v0.3.11", ) go_repository( name = "com_github_inconshreveable_mousetrap", @@ -900,6 +1056,13 @@ def go_dependencies(): sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=", version = "v0.2.0", ) + go_repository( + name = "com_github_kylelemons_godebug", + importpath = "github.com/kylelemons/godebug", + sum = "h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=", + version = "v1.1.0", + ) + go_repository( name = "com_github_labstack_echo_v4", importpath = "github.com/labstack/echo/v4", @@ -938,6 +1101,37 @@ def go_dependencies(): sum = "h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=", version = "v0.7.7", ) + go_repository( + name = "com_github_masterminds_goutils", + importpath = "github.com/Masterminds/goutils", + sum = "h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_masterminds_semver", + importpath = "github.com/Masterminds/semver", + sum = "h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_masterminds_semver_v3", + importpath = "github.com/Masterminds/semver/v3", + sum = "h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=", + version = "v3.2.0", + ) + go_repository( + name = "com_github_masterminds_sprig_v3", + importpath = "github.com/Masterminds/sprig/v3", + sum = "h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=", + version = "v3.2.3", + ) + go_repository( + name = "com_github_mattermost_xml_roundtrip_validator", + importpath = "github.com/mattermost/xml-roundtrip-validator", + sum = "h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU=", + version = "v0.1.0", + ) + go_repository( name = "com_github_mattn_go_colorable", importpath = "github.com/mattn/go-colorable", @@ -967,8 +1161,8 @@ def go_dependencies(): go_repository( name = "com_github_mattn_go_sqlite3", importpath = "github.com/mattn/go-sqlite3", - sum = "h1:TJ1bhYJPV44phC+IMu1u2K/i5RriLTPe+yc68XDJ1Z0=", - version = "v1.14.12", + sum = "h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=", + version = "v1.14.17", ) go_repository( @@ -977,6 +1171,30 @@ def go_dependencies(): sum = "h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=", version = "v1.0.4", ) + go_repository( + name = "com_github_mitchellh_copystructure", + importpath = "github.com/mitchellh/copystructure", + sum = "h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_mitchellh_go_wordwrap", + importpath = "github.com/mitchellh/go-wordwrap", + sum = "h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=", + version = "v0.0.0-20150314170334-ad45545899c7", + ) + go_repository( + name = "com_github_mitchellh_mapstructure", + importpath = "github.com/mitchellh/mapstructure", + sum = "h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_mitchellh_reflectwalk", + importpath = "github.com/mitchellh/reflectwalk", + sum = "h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=", + version = "v1.0.0", + ) go_repository( name = "com_github_moby_spdystream", @@ -1040,6 +1258,12 @@ def go_dependencies(): sum = "h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=", version = "v0.0.0-20200227124842-a10e7caefd8e", ) + go_repository( + name = "com_github_oklog_run", + importpath = "github.com/oklog/run", + sum = "h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=", + version = "v1.1.0", + ) go_repository( name = "com_github_olekukonko_tablewriter", @@ -1092,6 +1316,12 @@ def go_dependencies(): sum = "h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=", version = "v2.0.1+incompatible", ) + go_repository( + name = "com_github_pkg_diff", + importpath = "github.com/pkg/diff", + sum = "h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=", + version = "v0.0.0-20210226163009-20ebb0f2a09e", + ) go_repository( name = "com_github_pkg_errors", @@ -1182,6 +1412,13 @@ def go_dependencies(): sum = "h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=", version = "v1.10.0", ) + go_repository( + name = "com_github_russellhaering_goxmldsig", + importpath = "github.com/russellhaering/goxmldsig", + sum = "h1:8UcDh/xGyQiyrW+Fq5t8f+l2DLB1+zlhYzkPUJ7Qhys=", + version = "v1.4.0", + ) + go_repository( name = "com_github_russross_blackfriday_v2", importpath = "github.com/russross/blackfriday/v2", @@ -1194,6 +1431,18 @@ def go_dependencies(): sum = "h1:gD4vkYmuoWVgdV6UwI3tPo9MtMfVoIRY+Xn9919SJBg=", version = "v0.0.0-20190219015601-e8b6b52668fe", ) + go_repository( + name = "com_github_sergi_go_diff", + importpath = "github.com/sergi/go-diff", + sum = "h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_shopspring_decimal", + importpath = "github.com/shopspring/decimal", + sum = "h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=", + version = "v1.2.0", + ) go_repository( name = "com_github_shurcool_go", @@ -1252,6 +1501,12 @@ def go_dependencies(): sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=", version = "v0.0.0-20180118202830-f09979ecbc72", ) + go_repository( + name = "com_github_spf13_cast", + importpath = "github.com/spf13/cast", + sum = "h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=", + version = "v1.4.1", + ) go_repository( name = "com_github_spf13_cobra", @@ -1376,6 +1631,18 @@ def go_dependencies(): sum = "h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=", version = "v1.2.1", ) + go_repository( + name = "com_github_vmihailenco_msgpack_v4", + importpath = "github.com/vmihailenco/msgpack/v4", + sum = "h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U=", + version = "v4.3.12", + ) + go_repository( + name = "com_github_vmihailenco_tagparser", + importpath = "github.com/vmihailenco/tagparser", + sum = "h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY=", + version = "v0.1.1", + ) go_repository( name = "com_github_wk8_go_ordered_map_v2", @@ -1422,6 +1689,19 @@ def go_dependencies(): sum = "h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=", version = "v1.4.13", ) + go_repository( + name = "com_github_zclconf_go_cty", + importpath = "github.com/zclconf/go-cty", + sum = "h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA=", + version = "v1.8.0", + ) + go_repository( + name = "com_github_zclconf_go_cty_debug", + importpath = "github.com/zclconf/go-cty-debug", + sum = "h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI=", + version = "v0.0.0-20191215020915-b22d67c1ba0b", + ) + go_repository( name = "com_google_cloud_go", importpath = "cloud.google.com/go", @@ -2185,6 +2465,7 @@ def go_dependencies(): sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=", version = "v0.9.1", ) + go_repository( name = "in_gopkg_resty_v1", importpath = "gopkg.in/resty.v1", @@ -2216,6 +2497,37 @@ def go_dependencies(): sum = "h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=", version = "v3.0.1", ) + go_repository( + name = "io_ariga_atlas", + importpath = "ariga.io/atlas", + sum = "h1:Tq2DRB39ZHScIwWACjPKLv5oEErv7zv6PBb5RTz5CKA=", + version = "v0.10.2-0.20230427182402-87a07dfb83bf", + ) + go_repository( + name = "io_entgo_ent", + importpath = "entgo.io/ent", + sum = "h1:N5lO2EOrHpCH5HYfiMOCHYbo+oh5M8GjT0/cx5x6xkk=", + version = "v0.12.3", + ) + go_repository( + name = "io_etcd_go_etcd_api_v3", + importpath = "go.etcd.io/etcd/api/v3", + sum = "h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=", + version = "v3.5.9", + ) + go_repository( + name = "io_etcd_go_etcd_client_pkg_v3", + importpath = "go.etcd.io/etcd/client/pkg/v3", + sum = "h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=", + version = "v3.5.9", + ) + go_repository( + name = "io_etcd_go_etcd_client_v3", + importpath = "go.etcd.io/etcd/client/v3", + sum = "h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=", + version = "v3.5.9", + ) + go_repository( name = "io_k8s_api", importpath = "k8s.io/api", @@ -2438,8 +2750,8 @@ def go_dependencies(): go_repository( name = "org_golang_google_api", importpath = "google.golang.org/api", - sum = "h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o=", - version = "v0.126.0", + sum = "h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw=", + version = "v0.134.0", ) go_repository( @@ -2463,8 +2775,8 @@ def go_dependencies(): go_repository( name = "org_golang_google_genproto_googleapis_bytestream", importpath = "google.golang.org/genproto/googleapis/bytestream", - sum = "h1:g3hIDl0jRNd9PPTs2uBzYuaD5mQuwOkZY0vSc0LR32o=", - version = "v0.0.0-20230530153820-e85fd2cbaebc", + sum = "h1:gm8vsVR64Jx1GxHY8M+p8YA2bxU/H/lymcutB2l7l9s=", + version = "v0.0.0-20230720185612-659f7aaaa771", ) go_repository( @@ -2552,8 +2864,8 @@ def go_dependencies(): go_repository( name = "org_golang_x_sync", importpath = "golang.org/x/sync", - sum = "h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=", - version = "v0.2.0", + sum = "h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=", + version = "v0.3.0", ) go_repository( name = "org_golang_x_sys", @@ -2582,8 +2894,8 @@ def go_dependencies(): go_repository( name = "org_golang_x_tools", importpath = "golang.org/x/tools", - sum = "h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=", - version = "v0.8.0", + sum = "h1:0wxTF6pSjIIhNt7mo9GvjDfzyCOiWhmICgtO/Ah948s=", + version = "v0.8.1-0.20230428195545-5283a0178901", ) go_repository( name = "org_golang_x_xerrors", diff --git a/go.mod b/go.mod index 1ab1f00e..47ebea5d 100644 --- a/go.mod +++ b/go.mod @@ -2,27 +2,36 @@ module go.resf.org/peridot go 1.20 -replace github.com/smartystreets/assertions v1.15.1 => github.com/smarty/assertions v1.15.1 +replace ( + github.com/coreos/bbolt v1.3.7 => go.etcd.io/bbolt v1.3.7 + github.com/smartystreets/assertions v1.15.1 => github.com/smarty/assertions v1.15.1 +) require ( github.com/bazelbuild/bazel-watcher v0.23.7 + github.com/dexidp/dex v0.0.0-20230804184036-a9d1fd31c329 + github.com/gogo/status v1.1.1 github.com/googleapis/api-linter v1.56.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 github.com/prometheus/client_golang v1.16.0 + github.com/sirupsen/logrus v1.9.3 github.com/temporalio/temporalite v0.3.0 github.com/urfave/cli/v2 v2.25.7 github.com/wk8/go-ordered-map/v2 v2.1.8 go.ciq.dev/pika v0.0.0-20230819201750-737c3e8f413d go.starlark.net v0.0.0-20230814145427-12f4cb8177e4 + golang.org/x/mod v0.10.0 google.golang.org/grpc v1.57.0 + google.golang.org/protobuf v1.31.0 k8s.io/api v0.28.0 k8s.io/apimachinery v0.28.0 k8s.io/cli-runtime v0.28.0 ) require ( + ariga.io/atlas v0.10.2-0.20230427182402-87a07dfb83bf // indirect bitbucket.org/creachadair/stringset v0.0.11 // indirect cloud.google.com/go v0.110.6 // indirect cloud.google.com/go/compute v1.23.0 // indirect @@ -30,10 +39,20 @@ require ( cloud.google.com/go/iam v1.1.1 // indirect cloud.google.com/go/longrunning v0.5.1 // indirect cloud.google.com/go/storage v1.30.1 // indirect + entgo.io/ent v0.12.3 // indirect + github.com/AppsFlyer/go-sundheit v0.5.0 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/agext/levenshtein v1.2.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/apache/thrift v0.16.0 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/aws/aws-sdk-go v1.44.41 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/beevik/etree v1.2.0 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -43,21 +62,30 @@ require ( github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/coreos/go-oidc/v3 v3.1.0 // indirect + github.com/coreos/go-oidc/v3 v3.6.0 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dexidp/dex/api/v2 v2.1.1-0.20230804184036-a9d1fd31c329 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsevents v0.1.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gertd/go-pluralize v0.2.1 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect + github.com/go-jose/go-jose/v3 v3.0.0 // indirect + github.com/go-ldap/ldap/v3 v3.4.5 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect + github.com/go-sql-driver/mysql v1.7.1 // indirect github.com/gocql/gocql v1.2.0 // indirect github.com/gogo/gateway v1.1.0 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/gogo/status v1.1.1 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang/glog v1.1.0 // indirect @@ -69,14 +97,20 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.4 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect + github.com/gorilla/handlers v1.5.1 // indirect + github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect + github.com/hashicorp/hcl/v2 v2.13.0 // indirect + github.com/huandu/xstrings v1.3.3 // indirect github.com/iancoleman/strcase v0.3.0 // indirect + github.com/imdario/mergo v0.3.11 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jaschaephraim/lrserver v0.0.0-20171129202958-50d19f603f71 // indirect github.com/jhump/protoreflect v1.15.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -91,13 +125,19 @@ require ( github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect + github.com/mattn/go-sqlite3 v1.14.17 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/oklog/run v1.1.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/olivere/elastic/v7 v7.0.32 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect @@ -112,10 +152,13 @@ require ( github.com/rivo/uniseg v0.4.3 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/russellhaering/goxmldsig v1.4.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/shopspring/decimal v1.2.0 // indirect github.com/smarty/assertions v1.15.1 // indirect github.com/smartystreets/assertions v1.15.1 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stretchr/objx v0.5.0 // indirect @@ -130,6 +173,10 @@ require ( github.com/valyala/fasttemplate v1.2.1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 // indirect + github.com/zclconf/go-cty v1.8.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/v3 v3.5.9 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.32.0 // indirect go.opentelemetry.io/otel v1.7.0 // indirect @@ -155,21 +202,19 @@ require ( go.uber.org/zap v1.23.0 // indirect golang.org/x/crypto v0.11.0 // indirect golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect - golang.org/x/mod v0.10.0 // indirect golang.org/x/net v0.13.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sync v0.2.0 // indirect + golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.11.0 // indirect golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.8.0 // indirect + golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.126.0 // indirect + google.golang.org/api v0.134.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230807174057-1744710a1577 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5 // indirect - google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/validator.v2 v2.0.1 // indirect diff --git a/go.sum b/go.sum index fa22feae..68e8a7a6 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +ariga.io/atlas v0.10.2-0.20230427182402-87a07dfb83bf h1:Tq2DRB39ZHScIwWACjPKLv5oEErv7zv6PBb5RTz5CKA= +ariga.io/atlas v0.10.2-0.20230427182402-87a07dfb83bf/go.mod h1:+TR129FJZ5Lvzms6dvCeGWh1yR6hMvmXBhug4hrNIGk= bitbucket.org/creachadair/stringset v0.0.11 h1:6Sv4CCv14Wm+OipW4f3tWOb0SQVpBDLW0knnJqUnmZ8= bitbucket.org/creachadair/stringset v0.0.11/go.mod h1:wh0BHewFe+j0HrzWz7KcGbSNpFzWwnpmgPRlB57U5jU= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -71,27 +73,51 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +entgo.io/ent v0.12.3 h1:N5lO2EOrHpCH5HYfiMOCHYbo+oh5M8GjT0/cx5x6xkk= +entgo.io/ent v0.12.3/go.mod h1:AigGGx+tbrBBYHAzGOg8ND661E5cxx1Uiu5o/otJ6Yg= +github.com/AppsFlyer/go-sundheit v0.5.0 h1:/VxpyigCfJrq1r97mn9HPiAB2qrhcTFHwNIIDr15CZM= +github.com/AppsFlyer/go-sundheit v0.5.0/go.mod h1:2ZM0BnfqT/mljBQO224VbL5XH06TgWuQ6Cn+cTtCpTY= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/aws/aws-sdk-go v1.44.41 h1:FNW3Tb8vKvXLZ7lzGlg/dCAXhK4RC5fyFewD11oJhUM= github.com/aws/aws-sdk-go v1.44.41/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/bazelbuild/bazel-watcher v0.23.7 h1:EfJzkMxJuNBGMVdEvkhiW7pAMwhaegbmAMaFCjLjyTw= github.com/bazelbuild/bazel-watcher v0.23.7/go.mod h1:kLJ66pDAR3DVpTPF8Fw3kLQXmxpMT5ll7rbpsFuNmNs= +github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= +github.com/beevik/etree v1.2.0 h1:l7WETslUG/T+xOPs47dtd6jov2Ii/8/OjCldk5fYfQw= +github.com/beevik/etree v1.2.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= github.com/benbjohnson/clock v0.0.0-20160125162948-a620c1cc9866/go.mod h1:UMqtWQTnOe4byzwe7Zhwh8f8s+36uszN51sJrSIZlTE= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= @@ -141,8 +167,12 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/coreos/go-oidc/v3 v3.1.0 h1:6avEvcdvTa1qYsOZ6I5PRkSYHzpTNWgKYmaJfaYbrRw= -github.com/coreos/go-oidc/v3 v3.1.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= +github.com/coreos/go-oidc/v3 v3.6.0 h1:AKVxfYw1Gmkn/w96z0DbT/B/xFnzTd3MkZvWLjF4n/o= +github.com/coreos/go-oidc/v3 v3.6.0/go.mod h1:ZpHUsHBucTUj6WOkrP4E20UPynbLZzhTQ1XKCXkxyPc= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -150,6 +180,10 @@ github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dexidp/dex v0.0.0-20230804184036-a9d1fd31c329 h1:y86PLEHr+sm6OyWS37VGPcGBaj+u51lltfrLPCcg3KA= +github.com/dexidp/dex v0.0.0-20230804184036-a9d1fd31c329/go.mod h1:YakYzHHcygyboa3c7Zkd8XRiMG1nPSKaexbmbNifmmA= +github.com/dexidp/dex/api/v2 v2.1.1-0.20230804184036-a9d1fd31c329 h1:2xadZzg7cmAXrlTYRmBGXXsnLwr+vYYknW9IxzU3Vtg= +github.com/dexidp/dex/api/v2 v2.1.1-0.20230804184036-a9d1fd31c329/go.mod h1:GrNNLJ+Un8WtvFwpiwU+eMIGTQ+nysZ4GSQI+mKffNU= github.com/dgryski/go-farm v0.0.0-20140601200337-fc41e106ee0e/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -169,22 +203,33 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fergusstrange/embedded-postgres v1.23.0 h1:ZYRD89nammxQDWDi6taJE2CYjDuAoVc1TpEqRIYQryc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fsnotify/fsevents v0.1.1 h1:/125uxJvvoSDDBPen6yUZbil8J9ydKZnnl3TWWmvnkw= github.com/fsnotify/fsevents v0.1.1/go.mod h1:+d+hS27T6k5J8CRaPLKFgwKYcpS7GwW3Ule9+SC2ZRc= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gertd/go-pluralize v0.2.1 h1:M3uASbVjMnTsPb0PNqg+E/24Vwigyo/tvyMTtAlLgiA= github.com/gertd/go-pluralize v0.2.1/go.mod h1:rbYaKDbsXxmRfr8uygAEKhOWsjyrrqrkHVpZvoOp8zk= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap/v3 v3.4.5 h1:ekEKmaDrpvR2yf5Nc/DClsGG9lAmdDixe44mLzlW5r8= +github.com/go-ldap/ldap/v3 v3.4.5/go.mod h1:bMGIq3AGbytbaMwf8wdv5Phdxz0FWHTIYMSzyrYgnQs= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -195,11 +240,16 @@ github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/gocql/gocql v1.2.0 h1:TZhsCd7fRuye4VyHr3WCvWwIQaZUmjsqnSIXK9FcVCE= github.com/gocql/gocql v1.2.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/gateway v1.1.0 h1:u0SuhL9+Il+UbjM9VIE3ntfRujKbvVpFvNB4HbjeVQ0= github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -302,6 +352,7 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -309,8 +360,8 @@ github.com/googleapis/api-linter v1.56.1 h1:bxA1SNXfBl5gERSm/ED5m9eqOfDY1YlNmfNi github.com/googleapis/api-linter v1.56.1/go.mod h1:zKhUtzO+U44FQ+qKUURbKHL7WpiikvCZ7Q7hhvmZ/mE= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -318,11 +369,15 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= -github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -342,10 +397,18 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= +github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jaschaephraim/lrserver v0.0.0-20171129202958-50d19f603f71 h1:24NdJ5N6gtrcoeS4JwLMeruKFmg20QdF/5UnX5S/j18= github.com/jaschaephraim/lrserver v0.0.0-20171129202958-50d19f603f71/go.mod h1:ozZLfjiLmXytkIUh200wMeuoQJ4ww06wN+KZtFP6j3g= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -358,6 +421,7 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -384,11 +448,14 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/labstack/echo/v4 v4.9.1 h1:GliPYSpzGKlyOhqIbG8nmHBo3i1saKWFOgh41AN3b+Y= github.com/labstack/echo/v4 v4.9.1/go.mod h1:Pop5HLc+xoc4qhTZ1ip6C0RtP7Z+4VzRLWZZFKqbbjo= github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8= @@ -401,6 +468,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9 github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU= +github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -413,11 +482,18 @@ github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.12 h1:TJ1bhYJPV44phC+IMu1u2K/i5RriLTPe+yc68XDJ1Z0= github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= +github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -428,6 +504,8 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/olivere/elastic/v7 v7.0.32 h1:R7CXvbu8Eq+WlsLgxmKVKPox0oOwAE/2T9Si5BnvK6E= @@ -437,6 +515,7 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -486,10 +565,17 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/russellhaering/goxmldsig v1.4.0 h1:8UcDh/xGyQiyrW+Fq5t8f+l2DLB1+zlhYzkPUJ7Qhys= +github.com/russellhaering/goxmldsig v1.4.0/go.mod h1:gM4MDENBQf7M+V824SGfyIUVFWydB7n0KkEubVJl+Tw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/samuel/go-thrift v0.0.0-20190219015601-e8b6b52668fe/go.mod h1:Vrkh1pnjV9Bl8c3P9zH0/D4NlOHWP5d4/hF4YTULaec= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.0.2-0.20170726183946-abee6f9b0679/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -500,12 +586,18 @@ github.com/smarty/assertions v1.15.1 h1:812oFiXI+G55vxsFf+8bIZ1ux30qtkdqzKbEFwyX github.com/smarty/assertions v1.15.1/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -547,6 +639,8 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= @@ -561,8 +655,16 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= go.ciq.dev/pika v0.0.0-20230819201750-737c3e8f413d h1:bHcWrRI7hU9LQLAuQh9RtWQhRBi3S8f/JcCBOMtiLSU= go.ciq.dev/pika v0.0.0-20230819201750-737c3e8f413d/go.mod h1:FUnF19p1SuyHy77srykZo5s3dFikTj5A/f1stmcotCA= +go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= +go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= +go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= +go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= +go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= +go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -640,10 +742,13 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -689,6 +794,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -714,7 +820,6 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -743,6 +848,9 @@ golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -781,8 +889,9 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -865,11 +974,17 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -880,6 +995,9 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -949,8 +1067,9 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 h1:0wxTF6pSjIIhNt7mo9GvjDfzyCOiWhmICgtO/Ah948s= +golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1004,8 +1123,8 @@ google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6r google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= -google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw= +google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1167,12 +1286,12 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/validator.v2 v2.0.0-20200605151824-2b28d334fa05/go.mod h1:o4V0GXN9/CAmCsvJ0oXYZvrZOe7syiDZSN1GWGZTGzc= diff --git a/vendor.go b/vendor.go index 81d04e96..a6e77ebe 100644 --- a/vendor.go +++ b/vendor.go @@ -24,4 +24,6 @@ import ( _ "github.com/bazelbuild/bazel-watcher/cmd/ibazel" // temporalite _ "github.com/temporalio/temporalite/cmd/temporalite" + // dex + _ "github.com/dexidp/dex/cmd/dex" ) diff --git a/vendor/ariga.io/atlas/LICENSE b/vendor/ariga.io/atlas/LICENSE new file mode 100644 index 00000000..7a4a3ea2 --- /dev/null +++ b/vendor/ariga.io/atlas/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/ariga.io/atlas/schemahcl/BUILD b/vendor/ariga.io/atlas/schemahcl/BUILD new file mode 100644 index 00000000..a0c7c7f5 --- /dev/null +++ b/vendor/ariga.io/atlas/schemahcl/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "schemahcl", + srcs = [ + "context.go", + "extension.go", + "hcl.go", + "opts.go", + "spec.go", + "stdlib.go", + "types.go", + ], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/schemahcl", + importpath = "ariga.io/atlas/schemahcl", + visibility = ["//visibility:public"], + deps = [ + "//vendor/ariga.io/atlas/sql/schema", + "//vendor/github.com/go-openapi/inflect", + "//vendor/github.com/hashicorp/hcl/v2:hcl", + "//vendor/github.com/hashicorp/hcl/v2/ext/tryfunc", + "//vendor/github.com/hashicorp/hcl/v2/gohcl", + "//vendor/github.com/hashicorp/hcl/v2/hclparse", + "//vendor/github.com/hashicorp/hcl/v2/hclsyntax", + "//vendor/github.com/hashicorp/hcl/v2/hclwrite", + "//vendor/github.com/zclconf/go-cty/cty", + "//vendor/github.com/zclconf/go-cty/cty/convert", + "//vendor/github.com/zclconf/go-cty/cty/function", + "//vendor/github.com/zclconf/go-cty/cty/function/stdlib", + "//vendor/github.com/zclconf/go-cty/cty/gocty", + ], +) diff --git a/vendor/ariga.io/atlas/schemahcl/context.go b/vendor/ariga.io/atlas/schemahcl/context.go new file mode 100644 index 00000000..1ea38708 --- /dev/null +++ b/vendor/ariga.io/atlas/schemahcl/context.go @@ -0,0 +1,464 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schemahcl + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// blockVar is an HCL resource that defines an input variable to the Atlas DDL document. +type blockVar struct { + Name string `hcl:",label"` + Type cty.Value `hcl:"type"` + Default cty.Value `hcl:"default,optional"` + Description string `hcl:"description,optional"` +} + +// setInputVals sets the input values into the evaluation context. HCL documents can define +// input variables in the document body by defining "variable" blocks: +// +// variable "name" { +// type = string // also supported: number, bool +// default = "rotemtam" +// } +func (s *State) setInputVals(ctx *hcl.EvalContext, body hcl.Body, input map[string]cty.Value) error { + var doc struct { + Vars []*blockVar `hcl:"variable,block"` + Remain hcl.Body `hcl:",remain"` + } + if diag := gohcl.DecodeBody(body, ctx, &doc); diag.HasErrors() { + return diag + } + ctxVars := make(map[string]cty.Value) + for _, v := range doc.Vars { + var vv cty.Value + switch iv, ok := input[v.Name]; { + case !v.Type.Type().IsCapsuleType(): + return fmt.Errorf( + "invalid type %q for variable %q. Valid types are: string, number, bool, list, map, or set", + v.Type.AsString(), v.Name, + ) + case ok: + vv = iv + case v.Default != cty.NilVal: + vv = v.Default + default: + return fmt.Errorf("missing value for required variable %q", v.Name) + } + vt := v.Type.EncapsulatedValue().(*cty.Type) + // In case the input value is a primitive type and the expected type is a list, + // wrap it as a list because the variable type may not be known to the caller. + if vt.IsListType() && vv.Type().Equals(vt.ElementType()) { + vv = cty.ListVal([]cty.Value{vv}) + } + cv, err := convert.Convert(vv, *vt) + if err != nil { + return fmt.Errorf("variable %q: %w", v.Name, err) + } + ctxVars[v.Name] = cv + } + mergeCtxVar(ctx, ctxVars) + return nil +} + +// evalReferences evaluates data blocks. +func (s *State) evalReferences(ctx *hcl.EvalContext, body *hclsyntax.Body) error { + type node struct { + addr [3]string + edges func() []hcl.Traversal + value func() (cty.Value, error) + } + var ( + initblk []*node + nodes = make(map[[3]string]*node) + blocks = make(hclsyntax.Blocks, 0, len(body.Blocks)) + ) + for _, b := range body.Blocks { + switch b := b; { + case b.Type == dataBlock: + if len(b.Labels) < 2 { + return fmt.Errorf("data block %q must have exactly 2 labels", b.Type) + } + h, ok := s.config.datasrc[b.Labels[0]] + if !ok { + return fmt.Errorf("missing data source handler for %q", b.Labels[0]) + } + // Data references are combined from + // "data", "source" and "name" labels. + addr := [3]string{dataBlock, b.Labels[0], b.Labels[1]} + nodes[addr] = &node{ + addr: addr, + value: func() (cty.Value, error) { return h(ctx, b) }, + edges: func() []hcl.Traversal { return bodyVars(b.Body) }, + } + case b.Type == localsBlock: + for k, v := range b.Body.Attributes { + k, v := k, v + // Local references are combined from + // "local" and "name" labels. + addr := [3]string{localRef, k, ""} + nodes[addr] = &node{ + addr: addr, + edges: func() []hcl.Traversal { return hclsyntax.Variables(v.Expr) }, + value: func() (cty.Value, error) { + v, diags := v.Expr.Value(ctx) + if diags.HasErrors() { + return cty.NilVal, diags + } + return v, nil + }, + } + } + case s.config.initblk[b.Type] != nil: + if len(b.Labels) != 0 { + return fmt.Errorf("init block %q cannot have labels", b.Type) + } + addr := [3]string{b.Type, "", ""} + if nodes[addr] != nil { + return fmt.Errorf("duplicate init block %q", b.Type) + } + h := s.config.initblk[b.Type] + n := &node{ + addr: addr, + value: func() (cty.Value, error) { return h(ctx, b) }, + edges: func() []hcl.Traversal { return bodyVars(b.Body) }, + } + nodes[addr] = n + initblk = append(initblk, n) + default: + blocks = append(blocks, b) + } + } + var ( + visit func(n *node) error + visited = make(map[*node]bool) + progress = make(map[*node]bool) + ) + visit = func(n *node) error { + if visited[n] { + return nil + } + if progress[n] { + addr := n.addr[:] + for len(addr) > 0 && addr[len(addr)-1] == "" { + addr = addr[:len(addr)-1] + } + return fmt.Errorf("cyclic reference to %q", strings.Join(addr, ".")) + } + progress[n] = true + for _, e := range n.edges() { + var addr [3]string + switch root := e.RootName(); { + case root == localRef && len(e) == 2: + addr = [3]string{localRef, e[1].(hcl.TraverseAttr).Name, ""} + case root == dataBlock && len(e) > 2: + addr = [3]string{dataBlock, e[1].(hcl.TraverseAttr).Name, e[2].(hcl.TraverseAttr).Name} + case s.config.initblk[root] != nil && len(e) == 1: + addr = [3]string{root, "", ""} + } + // Unrecognized reference. + if nodes[addr] == nil { + continue + } + if err := visit(nodes[addr]); err != nil { + return err + } + } + delete(progress, n) + v, err := n.value() + if err != nil { + return err + } + switch n.addr[0] { + case dataBlock: + data := make(map[string]cty.Value) + if vv, ok := ctx.Variables[dataBlock]; ok { + data = vv.AsValueMap() + } + src := make(map[string]cty.Value) + if vv, ok := data[n.addr[1]]; ok { + src = vv.AsValueMap() + } + src[n.addr[2]] = v + data[n.addr[1]] = cty.ObjectVal(src) + ctx.Variables[dataBlock] = cty.ObjectVal(data) + case localRef: + locals := make(map[string]cty.Value) + if vv, ok := ctx.Variables[localRef]; ok { + locals = vv.AsValueMap() + } + locals[n.addr[1]] = v + ctx.Variables[localRef] = cty.ObjectVal(locals) + default: + ctx.Variables[n.addr[0]] = v + } + return nil + } + // Evaluate init-blocks first, + // to give them higher precedence. + for _, n := range initblk { + if err := visit(n); err != nil { + return err + } + } + for _, n := range nodes { + if err := visit(n); err != nil { + return err + } + } + body.Blocks = blocks + return nil +} + +func mergeCtxVar(ctx *hcl.EvalContext, vals map[string]cty.Value) { + v, ok := ctx.Variables[varRef] + if ok { + v.ForEachElement(func(key cty.Value, val cty.Value) (stop bool) { + vals[key.AsString()] = val + return false + }) + } + ctx.Variables[varRef] = cty.ObjectVal(vals) +} + +func setBlockVars(ctx *hcl.EvalContext, b *hclsyntax.Body) (*hcl.EvalContext, error) { + defs := defRegistry(b) + vars, err := blockVars(b.Blocks, "", defs) + if err != nil { + return nil, err + } + if ctx.Variables == nil { + ctx.Variables = make(map[string]cty.Value) + } + for k, v := range vars { + ctx.Variables[k] = v + } + return ctx, nil +} + +func blockVars(blocks hclsyntax.Blocks, parentAddr string, defs *blockDef) (map[string]cty.Value, error) { + vars := make(map[string]cty.Value) + for name, def := range defs.children { + v := make(map[string]cty.Value) + qv := make(map[string]map[string]cty.Value) + blocks := blocksOfType(blocks, name) + if len(blocks) == 0 { + vars[name] = cty.NullVal(def.asCty()) + continue + } + var unlabeled int + for _, blk := range blocks { + qualifier, blkName := blockName(blk) + if blkName == "" { + blkName = strconv.Itoa(unlabeled) + unlabeled++ + } + attrs := attrMap(blk.Body.Attributes) + // Fill missing attributes with zero values. + for n := range def.fields { + if _, ok := attrs[n]; !ok { + attrs[n] = cty.NullVal(ctyNilType) + } + } + self := addr(parentAddr, name, blkName, qualifier) + attrs["__ref"] = cty.StringVal(self) + varMap, err := blockVars(blk.Body.Blocks, self, def) + if err != nil { + return nil, err + } + // Merge children blocks in. + for k, v := range varMap { + attrs[k] = v + } + switch { + case qualifier != "": + obj := cty.ObjectVal(attrs) + if _, ok := qv[qualifier]; !ok { + qv[qualifier] = make(map[string]cty.Value) + } + qv[qualifier][blkName] = obj + obj = cty.ObjectVal(qv[qualifier]) + v[qualifier] = obj + default: + v[blkName] = cty.ObjectVal(attrs) + } + } + if len(v) > 0 { + vars[name] = cty.ObjectVal(v) + } + } + return vars, nil +} + +func addr(parentAddr, typeName, blkName, qualifier string) string { + var b strings.Builder + if parentAddr != "" { + b.WriteString(parentAddr) + b.WriteString(".") + } + b.WriteByte('$') + b.WriteString(typeName) + for _, s := range []string{qualifier, blkName} { + switch { + case s == "": + case validIdent(s): + b.WriteString(".") + b.WriteString(s) + default: + b.WriteString("[") + b.WriteString(strconv.Quote(s)) + b.WriteString("]") + } + } + return b.String() +} + +// validIdent reports if the given string can +// be used as an identifier in a reference. +func validIdent(s string) bool { + _, err := cty.ParseNumberVal(s) + return err == nil || hclsyntax.ValidIdentifier(s) +} + +func blockName(blk *hclsyntax.Block) (qualifier string, name string) { + switch len(blk.Labels) { + case 0: + case 1: + name = blk.Labels[0] + default: + qualifier = blk.Labels[0] + name = blk.Labels[1] + } + return +} + +func blocksOfType(blocks hclsyntax.Blocks, typeName string) []*hclsyntax.Block { + var out []*hclsyntax.Block + for _, block := range blocks { + if block.Type == typeName { + out = append(out, block) + } + } + return out +} + +func attrMap(attrs hclsyntax.Attributes) map[string]cty.Value { + out := make(map[string]cty.Value) + for _, v := range attrs { + value, diag := v.Expr.Value(nil) + if diag.HasErrors() { + continue + } + out[v.Name] = value + } + return out +} + +var ( + ctyNilType = cty.Capsule("type", reflect.TypeOf(cty.NilType)) + ctyTypeSpec = cty.Capsule("type", reflect.TypeOf(Type{})) + ctyRefType = cty.Capsule("ref", reflect.TypeOf(Ref{})) + ctyRawExpr = cty.Capsule("raw", reflect.TypeOf(RawExpr{})) +) + +// Built-in blocks. +const ( + varBlock = "variable" + dataBlock = "data" + localsBlock = "locals" + forEachAttr = "for_each" + eachRef = "each" + varRef = "var" + localRef = "local" +) + +// defRegistry returns a tree of blockDef structs representing the schema of the +// blocks in the *hclsyntax.Body. The returned fields and children of each type +// are an intersection of all existing blocks of the same type. +func defRegistry(b *hclsyntax.Body) *blockDef { + reg := &blockDef{ + fields: make(map[string]struct{}), + children: make(map[string]*blockDef), + } + for _, blk := range b.Blocks { + // variable definition blocks are available in the HCL source but not reachable by reference. + if blk.Type == varBlock { + continue + } + reg.child(extractDef(blk, reg)) + } + return reg +} + +// blockDef describes a type of block in the HCL document. +type blockDef struct { + name string + fields map[string]struct{} + parent *blockDef + children map[string]*blockDef +} + +// child updates the definition for the child type of the blockDef. +func (t *blockDef) child(c *blockDef) { + ex, ok := t.children[c.name] + if !ok { + t.children[c.name] = c + return + } + for f := range c.fields { + ex.fields[f] = struct{}{} + } + for _, c := range c.children { + ex.child(c) + } +} + +// asCty returns a cty.Type representing the blockDef. +func (t *blockDef) asCty() cty.Type { + f := make(map[string]cty.Type) + for attr := range t.fields { + f[attr] = ctyNilType + } + f["__ref"] = cty.String + for _, c := range t.children { + f[c.name] = c.asCty() + } + return cty.Object(f) +} + +func extractDef(blk *hclsyntax.Block, parent *blockDef) *blockDef { + cur := &blockDef{ + name: blk.Type, + parent: parent, + fields: make(map[string]struct{}), + children: make(map[string]*blockDef), + } + for _, a := range blk.Body.Attributes { + cur.fields[a.Name] = struct{}{} + } + for _, c := range blk.Body.Blocks { + cur.child(extractDef(c, cur)) + } + return cur +} + +func bodyVars(b *hclsyntax.Body) (vars []hcl.Traversal) { + for _, attr := range b.Attributes { + vars = append(vars, hclsyntax.Variables(attr.Expr)...) + } + for _, b := range b.Blocks { + vars = append(vars, bodyVars(b.Body)...) + } + return +} diff --git a/vendor/ariga.io/atlas/schemahcl/extension.go b/vendor/ariga.io/atlas/schemahcl/extension.go new file mode 100644 index 00000000..499603df --- /dev/null +++ b/vendor/ariga.io/atlas/schemahcl/extension.go @@ -0,0 +1,671 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schemahcl + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// Remainer is the interface that is implemented by types that can store +// additional attributes and children resources. +type Remainer interface { + // Remain returns a resource representing any extra children and attributes + // that are related to the struct but were not mapped to any of its fields. + Remain() *Resource +} + +// DefaultExtension can be embedded in structs that need basic default behavior. +// For instance, DefaultExtension implements Remainer, and has a private *Resource +// field that can store additional attributes and children that do not match the +// structs fields. +type DefaultExtension struct { + Extra Resource +} + +// Remain implements the Remainer interface. +func (d *DefaultExtension) Remain() *Resource { + return &d.Extra +} + +// Attr returns the Attr by the provided name and reports whether it was found. +func (d *DefaultExtension) Attr(name string) (*Attr, bool) { + return d.Extra.Attr(name) +} + +type registry map[string]any + +var ( + extensions = make(registry) + extensionsMu sync.RWMutex +) + +func (r registry) lookup(ext any) (string, bool) { + extensionsMu.RLock() + defer extensionsMu.RUnlock() + for k, v := range r { + if reflect.TypeOf(ext) == reflect.TypeOf(v) { + return k, true + } + } + return "", false +} + +// implementers returns a slice of the names of the extensions that implement i. +func (r registry) implementers(i reflect.Type) ([]string, error) { + if i.Kind() != reflect.Interface { + return nil, fmt.Errorf("schemahcl: expected interface got %s", i.Kind()) + } + var names []string + for name, typ := range r { + if reflect.TypeOf(typ).Implements(i) { + names = append(names, name) + } + } + return names, nil +} + +// Register records the type of ext in the global extension registry. +// If Register is called twice with the same name or if ext is nil, +// it panics. +func Register(name string, ext any) { + extensionsMu.Lock() + defer extensionsMu.Unlock() + if ext == nil { + panic("schemahcl: Register extension is nil") + } + if _, dup := extensions[name]; dup { + panic("schemahcl: Register called twice for type " + name) + } + extensions[name] = ext +} + +// As reads the attributes and children resources of the resource into the target struct. +func (r *Resource) As(target any) error { + if err := validateStructPtr(target); err != nil { + return err + } + return r.as(target) +} + +// As reads the attributes and children resources of the resource into the target struct. +func (r *Resource) as(target any) error { + existingAttrs, existingChildren := existingElements(r) + var seenName, seenQualifier bool + v := reflect.ValueOf(target).Elem() + for _, ft := range specFields(target) { + field := v.FieldByName(ft.Name) + switch { + case ft.isName() && !hasAttr(r, ft.tag): + if seenName { + return errors.New("schemahcl: extension must have only one isName field") + } + seenName = true + if field.Kind() != reflect.String { + return errors.New("schemahcl: extension isName field must be of type string") + } + field.SetString(r.Name) + case ft.isQualifier(): + if seenQualifier { + return errors.New("schemahcl: extension must have only one qualifier field") + } + seenQualifier = true + field.SetString(r.Qualifier) + case hasAttr(r, ft.tag): + attr, _ := r.Attr(ft.tag) + if err := setField(field, attr); err != nil { + return err + } + delete(existingAttrs, attr.K) + case ft.isInterfaceSlice(): + elem := field.Type().Elem() + impls, err := extensions.implementers(elem) + if err != nil { + return err + } + children := childrenOfType(r, impls...) + slc := reflect.MakeSlice(reflect.SliceOf(elem), 0, len(children)) + for _, c := range children { + typ, ok := extensions[c.Type] + if !ok { + return fmt.Errorf("extension %q not registered", c.Type) + } + n := reflect.New(reflect.TypeOf(typ).Elem()) + ext := n.Interface() + if err := c.as(ext); err != nil { + return err + } + slc = reflect.Append(slc, reflect.ValueOf(ext)) + } + field.Set(slc) + for _, i := range impls { + delete(existingChildren, i) + } + case ft.isInterface(): + impls, err := extensions.implementers(ft.Type) + if err != nil { + return err + } + children := childrenOfType(r, impls...) + if len(children) == 0 { + continue + } + if len(children) > 1 { + return fmt.Errorf("more than one blocks implement %q", ft.Type) + } + c := children[0] + typ, ok := extensions[c.Type] + if !ok { + return fmt.Errorf("extension %q not registered", c.Type) + } + n := reflect.New(reflect.TypeOf(typ).Elem()) + ext := n.Interface() + if err := c.as(ext); err != nil { + return err + } + field.Set(n) + case isResourceSlice(field.Type()): + if err := setChildSlice(field, childrenOfType(r, ft.tag)); err != nil { + return err + } + delete(existingChildren, ft.tag) + case isSingleResource(field.Type()): + c := childrenOfType(r, ft.tag) + if len(c) == 0 { + continue + } + var ( + res = c[0] + n reflect.Value + ) + switch field.Type().Kind() { + case reflect.Struct: + n = reflect.New(field.Type()) + ext := n.Interface() + if err := res.as(ext); err != nil { + return err + } + n = n.Elem() + case reflect.Pointer: + n = reflect.New(field.Type().Elem()) + ext := n.Interface() + if err := res.as(ext); err != nil { + return err + } + } + field.Set(n) + delete(existingChildren, ft.tag) + } + } + rem, ok := target.(Remainer) + if !ok { + return nil + } + extras := rem.Remain() + for attrName := range existingAttrs { + attr, ok := r.Attr(attrName) + if !ok { + return fmt.Errorf("schemahcl: expected attr %q to exist", attrName) + } + extras.SetAttr(attr) + } + for childType := range existingChildren { + children := childrenOfType(r, childType) + extras.Children = append(extras.Children, children...) + } + return nil +} + +// FinalName returns the final name for the resource by examining the struct tags for +// the extension of the Resource's type. If no such extension is registered or the +// extension struct does not have a name field, an error is returned. +func (r *Resource) FinalName() (string, error) { + extensionsMu.RLock() + defer extensionsMu.RUnlock() + t, ok := extensions[r.Type] + if !ok { + return "", fmt.Errorf("no extension registered for %q", r.Type) + } + for _, fd := range specFields(t) { + if fd.isName() { + if fd.tag != "" { + name, ok := r.Attr(fd.tag) + if ok { + return name.String() + } + } + return r.Name, nil + } + } + return "", fmt.Errorf("extension %q has no name field", r.Type) +} + +func validateStructPtr(target any) error { + typeOf := reflect.TypeOf(target) + if typeOf.Kind() != reflect.Ptr { + return errors.New("schemahcl: expected target to be a pointer") + } + if typeOf.Elem().Kind() != reflect.Struct { + return errors.New("schemahcl: expected target to be a pointer to a struct") + } + return nil +} + +func existingElements(r *Resource) (attrs, children map[string]struct{}) { + attrs, children = make(map[string]struct{}), make(map[string]struct{}) + for _, ea := range r.Attrs { + attrs[ea.K] = struct{}{} + } + for _, ec := range r.Children { + children[ec.Type] = struct{}{} + } + return +} + +func setChildSlice(field reflect.Value, children []*Resource) error { + if field.Type().Kind() != reflect.Slice { + return fmt.Errorf("schemahcl: expected field to be of kind slice") + } + if len(children) == 0 { + return nil + } + typ := field.Type().Elem() + slc := reflect.MakeSlice(reflect.SliceOf(typ), 0, len(children)) + for _, c := range children { + n := reflect.New(typ.Elem()) + ext := n.Interface() + if err := c.as(ext); err != nil { + return err + } + slc = reflect.Append(slc, reflect.ValueOf(ext)) + } + field.Set(slc) + return nil +} + +func setField(field reflect.Value, attr *Attr) error { + switch field.Kind() { + case reflect.Slice: + return setSliceAttr(field, attr) + case reflect.String: + s, err := attr.String() + if err != nil { + return fmt.Errorf("value of attr %q cannot be read as string: %w", attr.K, err) + } + field.SetString(s) + case reflect.Int, reflect.Int64: + i, err := attr.Int() + if err != nil { + return fmt.Errorf("value of attr %q cannot be read as integer: %w", attr.K, err) + } + field.SetInt(int64(i)) + case reflect.Bool: + b, err := attr.Bool() + if err != nil { + return fmt.Errorf("value of attr %q cannot be read as bool: %w", attr.K, err) + } + field.SetBool(b) + case reflect.Ptr: + if err := setPtr(field, attr.V); err != nil { + return fmt.Errorf("set field %q: %w", attr.K, err) + } + case reflect.Interface: + field.Set(reflect.ValueOf(attr.V)) + default: + if err := gocty.FromCtyValue(attr.V, field.Addr().Interface()); err != nil { + return fmt.Errorf("set field %q of type %T: %w", attr.K, field, err) + } + } + return nil +} + +func setPtr(field reflect.Value, cv cty.Value) error { + rt := reflect.TypeOf(cv) + if field.Type() == rt { + field.Set(reflect.ValueOf(cv)) + return nil + } + // If we are setting a Type field handle RawExpr and Ref specifically. + if _, ok := field.Interface().(*Type); ok { + if !cv.Type().IsCapsuleType() { + return fmt.Errorf("unexpected type %s", cv.Type().FriendlyName()) + } + switch t := cv.EncapsulatedValue().(type) { + case *RawExpr: + field.Set(reflect.ValueOf(&Type{T: t.X})) + return nil + case *Ref: + field.Set(reflect.ValueOf(&Type{ + T: t.V, + IsRef: true, + })) + return nil + } + } + if field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + switch e := field.Interface().(type) { + case *Ref: + switch { + case cv.Type() == cty.String: + e.V = cv.AsString() + case cv.Type().IsCapsuleType(): + ref, ok := cv.EncapsulatedValue().(*Ref) + if !ok { + return fmt.Errorf("schemahcl: expected value to be a *Ref, got: %T", cv.EncapsulatedValue()) + } + e.V = ref.V + } + default: + if err := gocty.FromCtyValue(cv, e); err != nil { + return fmt.Errorf("converting cty.Value to %T: %w", e, err) + } + } + return nil +} + +// setSliceAttr sets the value of attr to the slice field. This function expects both the target field +// and the source attr to be slices. +func setSliceAttr(field reflect.Value, attr *Attr) error { + if !attr.V.Type().IsListType() && !attr.V.Type().IsTupleType() { + return fmt.Errorf("schemahcl: field is of type slice but attr %q is type: %s", attr.K, attr.V.Type().FriendlyName()) + } + typ := field.Type().Elem() + slc := reflect.MakeSlice(reflect.SliceOf(typ), 0, attr.V.LengthInt()) + switch typ.Kind() { + case reflect.String: + s, err := attr.Strings() + if err != nil { + return fmt.Errorf("cannot read attribute %q of type %q as string list: %w", attr.K, attr.V.Type().FriendlyName(), err) + } + for _, item := range s { + slc = reflect.Append(slc, reflect.ValueOf(item)) + } + case reflect.Bool: + bools, err := attr.Bools() + if err != nil { + return fmt.Errorf("cannot read attribute %q as bool list: %w", attr.K, err) + } + for _, item := range bools { + slc = reflect.Append(slc, reflect.ValueOf(item)) + } + case reflect.Ptr: + if typ != reflect.TypeOf(&Ref{}) { + return fmt.Errorf("only pointers to refs supported, got %s", typ) + } + for _, v := range attr.V.AsValueSlice() { + switch { + case v.Type().IsCapsuleType(): + slc = reflect.Append(slc, reflect.ValueOf(v.EncapsulatedValue().(*Ref))) + case isRef(v): + slc = reflect.Append(slc, reflect.ValueOf(&Ref{V: v.GetAttr("__ref").AsString()})) + default: + return fmt.Errorf("schemahcl: unsupported type %s in slice", v.Type().FriendlyName()) + } + } + default: + return fmt.Errorf("slice of unsupported kind: %q", typ.Kind()) + } + field.Set(slc) + return nil +} + +// Scan reads the Extension into the Resource. Scan will override the Resource +// name or type if they are set for the extension. +func (r *Resource) Scan(ext any) error { + if lookup, ok := extensions.lookup(ext); ok { + r.Type = lookup + } + v := indirect(reflect.ValueOf(ext)) + for _, ft := range specFields(ext) { + field := v.FieldByName(ft.Name) + switch { + case ft.omitempty() && isEmpty(field): + case ft.isName(): + if field.Kind() != reflect.String { + return errors.New("schemahcl: extension name field must be string") + } + r.Name = field.String() + case ft.isQualifier(): + if field.Kind() != reflect.String { + return errors.New("schemahcl: extension qualifier field must be string") + } + r.Qualifier = field.String() + case isResourceSlice(field.Type()): + for i := 0; i < field.Len(); i++ { + ext := field.Index(i).Interface() + child := &Resource{} + if err := child.Scan(ext); err != nil { + return err + } + child.Type = ft.tag + r.Children = append(r.Children, child) + } + case isSingleResource(field.Type()): + if k := field.Kind(); k == reflect.Struct && field.IsZero() || k == reflect.Pointer && field.IsNil() { + continue + } + ext := field.Interface() + child := &Resource{} + if err := child.Scan(ext); err != nil { + return err + } + child.Type = ft.tag + r.Children = append(r.Children, child) + case field.Kind() == reflect.Ptr: + if field.IsNil() { + continue + } + if err := scanPtr(ft.tag, r, field); err != nil { + return err + } + default: + if err := scanAttr(ft.tag, r, field); err != nil { + return err + } + } + } + rem, ok := ext.(Remainer) + if !ok { + return nil + } + extra := rem.Remain() + for _, attr := range extra.Attrs { + r.SetAttr(attr) + } + r.Children = append(r.Children, extra.Children...) + return nil +} + +func scanPtr(key string, r *Resource, field reflect.Value) error { + attr := &Attr{K: key} + switch e := field.Interface().(type) { + case *Ref: + attr.V = cty.CapsuleVal(ctyRefType, e) + case *Type: + attr.V = cty.CapsuleVal(ctyTypeSpec, e) + default: + t, err := gocty.ImpliedType(e) + if err != nil { + return fmt.Errorf("schemahcl: cannot infer type for field %q when scaning pointer: %w", key, err) + } + attr.V, err = gocty.ToCtyValue(e, t) + if err != nil { + return fmt.Errorf("schemahcl: cannot convert value for field %q: %w", key, err) + } + } + r.SetAttr(attr) + return nil +} + +func scanAttr(key string, r *Resource, field reflect.Value) error { + switch k := field.Kind(); { + case k == reflect.Interface: + if field.IsNil() { + break + } + i := field.Interface() + v, ok := i.(cty.Value) + if !ok { + return fmt.Errorf("schemahcl: unsupported interface type %T for field %q", i, key) + } + r.SetAttr(&Attr{K: key, V: v}) + case field.Type() == reflect.TypeOf([]*Ref{}): + if field.Len() > 0 { + r.SetAttr(RefsAttr(key, field.Interface().([]*Ref)...)) + } + case k == reflect.Int, k == reflect.Int64: + r.SetAttr(Int64Attr(key, field.Int())) + case k == reflect.Struct: + if v, ok := field.Interface().(cty.Value); ok && v.IsNull() { + break + } + fallthrough + default: + t, err := gocty.ImpliedType(field.Interface()) + if err != nil { + return fmt.Errorf("schemahcl: cannot infer type for field %q when scanning attribute: %w", key, err) + } + v, err := gocty.ToCtyValue(field.Interface(), t) + if err != nil { + return fmt.Errorf("schemahcl: cannot convert value for field %q: %w", key, err) + } + r.SetAttr(&Attr{ + K: key, + V: v, + }) + } + return nil +} + +// specFields uses reflection to find struct fields that are tagged with "spec" +// and returns a list of mappings from the tag to the field name. +func specFields(ext any) []fieldDesc { + t := indirect(reflect.TypeOf(ext)) + var fields []fieldDesc + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + tag, ok := f.Tag.Lookup("spec") + if !ok { + continue + } + d := fieldDesc{tag: tag, StructField: f} + if idx := strings.IndexByte(tag, ','); idx != -1 { + d.tag, d.options = tag[:idx], tag[idx+1:] + } + fields = append(fields, d) + } + return fields +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + } + return false +} + +type fieldDesc struct { + tag string // tag name. + options string // rest of the options. + reflect.StructField +} + +func (f fieldDesc) isName() bool { return f.is("name") } + +func (f fieldDesc) isQualifier() bool { return f.is("qualifier") } + +func (f fieldDesc) omitempty() bool { return f.is("omitempty") } + +func (f fieldDesc) is(t string) bool { + for _, opt := range strings.Split(f.options, ",") { + if opt == t { + return true + } + } + return false +} + +func (f fieldDesc) isInterfaceSlice() bool { + return f.Type.Kind() == reflect.Slice && f.Type.Elem().Kind() == reflect.Interface +} + +func (f fieldDesc) isInterface() bool { + return f.Type.Kind() == reflect.Interface +} + +func childrenOfType(r *Resource, types ...string) []*Resource { + var out []*Resource + for _, c := range r.Children { + for _, typ := range types { + if c.Type == typ { + out = append(out, c) + } + } + } + return out +} + +func isSingleResource(t reflect.Type) bool { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return false + } + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if _, ok := f.Tag.Lookup("spec"); ok { + return true + } + if f.Type == reflect.TypeOf(DefaultExtension{}) { + return true + } + } + return false +} + +func isResourceSlice(t reflect.Type) bool { + if t.Kind() != reflect.Slice { + return false + } + return isSingleResource(t.Elem()) +} + +func hasAttr(r *Resource, name string) bool { + _, ok := r.Attr(name) + return ok +} + +type rtype[T any] interface { + Elem() T + Kind() reflect.Kind +} + +// indirect returns the type at the end of indirection. +func indirect[T rtype[T]](t T) T { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} diff --git a/vendor/ariga.io/atlas/schemahcl/hcl.go b/vendor/ariga.io/atlas/schemahcl/hcl.go new file mode 100644 index 00000000..66595428 --- /dev/null +++ b/vendor/ariga.io/atlas/schemahcl/hcl.go @@ -0,0 +1,685 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schemahcl + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// Marshal returns the Atlas HCL encoding of v. +var Marshal = MarshalerFunc(New().MarshalSpec) + +type ( + // State is used to evaluate and marshal Atlas HCL documents and stores a configuration for these operations. + State struct { + config *Config + } + // Evaluator is the interface that wraps the Eval function. + Evaluator interface { + // Eval evaluates parsed HCL files using input variables into a schema.Realm. + Eval(*hclparse.Parser, any, map[string]cty.Value) error + } + // EvalFunc is an adapter that allows the use of an ordinary function as an Evaluator. + EvalFunc func(*hclparse.Parser, any, map[string]cty.Value) error + // Marshaler is the interface that wraps the MarshalSpec function. + Marshaler interface { + // MarshalSpec marshals the provided input into a valid Atlas HCL document. + MarshalSpec(any) ([]byte, error) + } + // MarshalerFunc is the function type that is implemented by the MarshalSpec + // method of the Marshaler interface. + MarshalerFunc func(any) ([]byte, error) +) + +// MarshalSpec implements Marshaler for Atlas HCL documents. +func (s *State) MarshalSpec(v any) ([]byte, error) { + r := &Resource{} + if err := r.Scan(v); err != nil { + return nil, fmt.Errorf("schemahcl: failed scanning %T to resource: %w", v, err) + } + return s.encode(r) +} + +// EvalFiles evaluates the files in the provided paths using the input variables and +// populates v with the result. +func (s *State) EvalFiles(paths []string, v any, input map[string]cty.Value) error { + parser := hclparse.NewParser() + for _, path := range paths { + if _, diag := parser.ParseHCLFile(path); diag.HasErrors() { + return diag + } + } + return s.Eval(parser, v, input) +} + +// Eval evaluates the parsed HCL documents using the input variables and populates v +// using the result. +func (s *State) Eval(parsed *hclparse.Parser, v any, input map[string]cty.Value) error { + ctx := s.config.newCtx() + reg := &blockDef{ + fields: make(map[string]struct{}), + children: make(map[string]*blockDef), + } + files := parsed.Files() + fileNames := make([]string, 0, len(files)) + allBlocks := make([]*hclsyntax.Block, 0, len(files)) + for name, file := range files { + fileNames = append(fileNames, name) + if err := s.setInputVals(ctx, file.Body, input); err != nil { + return err + } + body := file.Body.(*hclsyntax.Body) + if err := s.evalReferences(ctx, body); err != nil { + return err + } + blocks := make(hclsyntax.Blocks, 0, len(body.Blocks)) + for _, b := range body.Blocks { + switch { + // Variable blocks are not reachable by reference. + case b.Type == varBlock: + continue + // Semi-evaluate blocks with the for_each meta argument. + case b.Body != nil && b.Body.Attributes[forEachAttr] != nil: + nb, err := forEachBlocks(ctx, b) + if err != nil { + return err + } + blocks = append(blocks, nb...) + default: + blocks = append(blocks, b) + } + reg.child(extractDef(b, reg)) + } + body.Blocks = blocks + allBlocks = append(allBlocks, blocks...) + } + vars, err := blockVars(allBlocks, "", reg) + if err != nil { + return err + } + if ctx.Variables == nil { + ctx.Variables = make(map[string]cty.Value) + } + for k, v := range vars { + ctx.Variables[k] = v + } + spec := &Resource{} + sort.Slice(fileNames, func(i, j int) bool { + return fileNames[i] < fileNames[j] + }) + for _, fn := range fileNames { + file := files[fn] + r, err := s.resource(ctx, file) + if err != nil { + return err + } + spec.Children = append(spec.Children, r.Children...) + spec.Attrs = append(spec.Attrs, r.Attrs...) + } + if err := patchRefs(spec); err != nil { + return err + } + if err := spec.As(v); err != nil { + return fmt.Errorf("schemahcl: failed reading spec as %T: %w", v, err) + } + return nil +} + +// EvalBytes evaluates the data byte-slice as an Atlas HCL document using the input variables +// and stores the result in v. +func (s *State) EvalBytes(data []byte, v any, input map[string]cty.Value) error { + parser := hclparse.NewParser() + if _, diag := parser.ParseHCL(data, ""); diag.HasErrors() { + return diag + } + return s.Eval(parser, v, input) +} + +// addrRef maps addresses to their referenced resource. +type addrRef map[string]*Resource + +// patchRefs recursively searches for schemahcl.Ref under the provided schemahcl.Resource +// and patches any variables with their concrete names. +func patchRefs(spec *Resource) error { + return make(addrRef).patch(spec) +} + +func (r addrRef) patch(resource *Resource) error { + cp := r.copy().load(resource, "") + for _, attr := range resource.Attrs { + if !attr.IsRef() { + continue + } + ref := attr.V.EncapsulatedValue().(*Ref) + referenced, ok := cp[ref.V] + if !ok { + return fmt.Errorf("broken reference to %q", ref.V) + } + if name, err := referenced.FinalName(); err == nil { + ref.V = strings.ReplaceAll(ref.V, referenced.Name, name) + } + } + for _, ch := range resource.Children { + if err := cp.patch(ch); err != nil { + return err + } + } + return nil +} + +func (r addrRef) copy() addrRef { + n := make(addrRef) + for k, v := range r { + n[k] = v + } + return n +} + +// load the references from the children of the resource. +func (r addrRef) load(res *Resource, track string) addrRef { + unlabeled := 0 + for _, ch := range res.Children { + current := addr("", ch.Type, ch.Name, ch.Qualifier) + if ch.Name == "" { + current += "." + strconv.Itoa(unlabeled) + unlabeled++ + } + if track != "" { + current = track + "." + current + } + r[current] = ch + r.load(ch, current) + } + return r +} + +// resource converts the hcl file to a schemahcl.Resource. +func (s *State) resource(ctx *hcl.EvalContext, file *hcl.File) (*Resource, error) { + body, ok := file.Body.(*hclsyntax.Body) + if !ok { + return nil, fmt.Errorf("schemahcl: expected remainder to be of type *hclsyntax.Body") + } + attrs, err := s.toAttrs(ctx, body.Attributes, nil) + if err != nil { + return nil, err + } + res := &Resource{ + Attrs: attrs, + } + for _, blk := range body.Blocks { + // variable blocks may be included in the document but are skipped in unmarshaling. + if blk.Type == varBlock { + continue + } + ctx, err := setBlockVars(ctx.NewChild(), blk.Body) + if err != nil { + return nil, err + } + resource, err := s.toResource(ctx, blk, []string{blk.Type}) + if err != nil { + return nil, err + } + res.Children = append(res.Children, resource) + } + return res, nil +} + +// mayScopeContext returns a new limited context for the given scope with access only +// to variables defined by WithScopedEnums and WithTypes and references in the document. +func (s *State) mayScopeContext(ctx *hcl.EvalContext, scope []string) *hcl.EvalContext { + path := strings.Join(scope, ".") + vars, ok1 := s.config.pathVars[path] + funcs, ok2 := s.config.pathFuncs[path] + if !ok1 && !ok2 { + return ctx + } + nctx := &hcl.EvalContext{ + Variables: make(map[string]cty.Value), + Functions: make(map[string]function.Function), + } + for n, v := range vars { + nctx.Variables[n] = v + } + for n, f := range funcs { + nctx.Functions[n] = f + } + // A patch from the past. Should be moved + // to specific scopes in the future. + nctx.Functions["sql"] = rawExprImpl() + for p := ctx; p != nil; p = p.Parent() { + for k, v := range p.Variables { + if isRef(v) { + nctx.Variables[k] = v + } + } + } + return nctx +} + +func (s *State) toAttrs(ctx *hcl.EvalContext, hclAttrs hclsyntax.Attributes, scope []string) ([]*Attr, error) { + var attrs []*Attr + for _, hclAttr := range hclAttrs { + scope := append(scope, hclAttr.Name) + value, diag := hclAttr.Expr.Value(s.mayScopeContext(ctx, scope)) + if diag.HasErrors() { + return nil, s.typeError(diag, scope) + } + at := &Attr{K: hclAttr.Name} + switch t := value.Type(); { + case isRef(value): + at.V = cty.CapsuleVal(ctyRefType, &Ref{V: value.GetAttr("__ref").AsString()}) + case (t.IsTupleType() || t.IsListType() || t.IsSetType()) && value.LengthInt() > 0: + values := make([]cty.Value, 0, value.LengthInt()) + for it := value.ElementIterator(); it.Next(); { + _, v := it.Element() + if isRef(v) { + v = cty.CapsuleVal(ctyRefType, &Ref{V: v.GetAttr("__ref").AsString()}) + } + values = append(values, v) + } + at.V = cty.ListVal(values) + default: + at.V = value + } + attrs = append(attrs, at) + } + // hclsyntax.Attrs is an alias for map[string]*Attribute + sort.Slice(attrs, func(i, j int) bool { + return attrs[i].K < attrs[j].K + }) + return attrs, nil +} + +// typeError improves diagnostic reporting in case of parse error. +func (s *State) typeError(diag hcl.Diagnostics, scope []string) error { + path := strings.Join(scope, ".") + for _, d := range diag { + switch e := d.Expression.(type) { + case *hclsyntax.FunctionCallExpr: + if d.Summary != "Call to unknown function" { + continue + } + if t, ok := s.findTypeSpec(e.Name); ok && len(t.Attributes) == 0 { + d.Detail = fmt.Sprintf("Type %q does not accept attributes", t.Name) + } + case *hclsyntax.ScopeTraversalExpr: + if d.Summary != "Unknown variable" { + continue + } + if t, ok := s.findTypeSpec(e.Traversal.RootName()); ok && len(t.Attributes) > 0 { + d.Detail = fmt.Sprintf("Type %q requires at least 1 argument", t.Name) + } else if n := len(scope); n > 1 && (s.config.pathVars[path] != nil || s.config.pathFuncs[path] != nil) { + d.Summary = strings.Replace(d.Summary, "variable", fmt.Sprintf("%s.%s", scope[n-2], scope[n-1]), 1) + d.Detail = strings.Replace(d.Detail, "variable", scope[n-1], 1) + } + } + } + return diag +} + +func isRef(v cty.Value) bool { + t := v.Type() + if !t.IsObjectType() { + return false + } + if t.HasAttribute("__ref") { + return true + } + it := v.ElementIterator() + for it.Next() { + if _, v := it.Element(); isRef(v) { + return true + } + } + return false +} + +func (s *State) toResource(ctx *hcl.EvalContext, block *hclsyntax.Block, scope []string) (*Resource, error) { + spec := &Resource{ + Type: block.Type, + } + switch len(block.Labels) { + case 0: + case 1: + spec.Name = block.Labels[0] + case 2: + spec.Qualifier = block.Labels[0] + spec.Name = block.Labels[1] + default: + return nil, fmt.Errorf("too many labels for block: %s", block.Labels) + } + ctx = s.mayScopeContext(ctx, scope) + attrs, err := s.toAttrs(ctx, block.Body.Attributes, scope) + if err != nil { + return nil, err + } + spec.Attrs = attrs + for _, blk := range block.Body.Blocks { + r, err := s.toResource(ctx, blk, append(scope, blk.Type)) + if err != nil { + return nil, err + } + spec.Children = append(spec.Children, r) + } + return spec, nil +} + +// encode the given *schemahcl.Resource into a byte slice containing an Atlas HCL +// document representing it. +func (s *State) encode(r *Resource) ([]byte, error) { + f := hclwrite.NewFile() + body := f.Body() + // If the resource has a Type then it is rendered as an HCL block. + if r.Type != "" { + blk := body.AppendNewBlock(r.Type, labels(r)) + body = blk.Body() + } + for _, attr := range r.Attrs { + if err := s.writeAttr(attr, body); err != nil { + return nil, err + } + } + for _, res := range r.Children { + if err := s.writeResource(res, body); err != nil { + return nil, err + } + } + var buf bytes.Buffer + _, err := f.WriteTo(&buf) + return buf.Bytes(), err +} + +func (s *State) writeResource(b *Resource, body *hclwrite.Body) error { + blk := body.AppendNewBlock(b.Type, labels(b)) + nb := blk.Body() + for _, attr := range b.Attrs { + if err := s.writeAttr(attr, nb); err != nil { + return err + } + } + for _, b := range b.Children { + if err := s.writeResource(b, nb); err != nil { + return err + } + } + return nil +} + +func labels(r *Resource) []string { + var l []string + if r.Qualifier != "" { + l = append(l, r.Qualifier) + } + if r.Name != "" { + l = append(l, r.Name) + } + return l +} + +func (s *State) writeAttr(attr *Attr, body *hclwrite.Body) error { + switch { + case attr.IsRef(): + v, err := attr.Ref() + if err != nil { + return err + } + ts, err := hclRefTokens(v) + if err != nil { + return err + } + body.SetAttributeRaw(attr.K, ts) + case attr.IsType(): + t, err := attr.Type() + if err != nil { + return err + } + if t.IsRef { + ts, err := hclRefTokens(t.T) + if err != nil { + return err + } + body.SetAttributeRaw(attr.K, ts) + break + } + spec, ok := s.findTypeSpec(t.T) + if !ok { + v := fmt.Sprintf("sql(%q)", t.T) + body.SetAttributeRaw(attr.K, hclRawTokens(v)) + break + } + st, err := hclType(spec, t) + if err != nil { + return err + } + body.SetAttributeRaw(attr.K, hclRawTokens(st)) + case attr.IsRawExpr(): + v, err := attr.RawExpr() + if err != nil { + return err + } + // TODO(rotemtam): the func name should be decided on contextual basis. + fnc := fmt.Sprintf("sql(%q)", v.X) + body.SetAttributeRaw(attr.K, hclRawTokens(fnc)) + case attr.V.Type().IsListType(): + // Skip scanning nil slices ([]T(nil)) by default. Users that + // want to print empty lists, should use make([]T, 0) instead. + if attr.V.LengthInt() == 0 { + return nil + } + tokens := make([]hclwrite.Tokens, 0, attr.V.LengthInt()) + for _, v := range attr.V.AsValueSlice() { + if v.Type().IsCapsuleType() { + ref, ok := v.EncapsulatedValue().(*Ref) + if !ok { + return fmt.Errorf("unsupported capsule type: %v", v.Type()) + } + ts, err := hclRefTokens(ref.V) + if err != nil { + return err + } + tokens = append(tokens, ts) + } else { + tokens = append(tokens, hclwrite.TokensForValue(v)) + } + } + body.SetAttributeRaw(attr.K, hclList(tokens)) + default: + body.SetAttributeValue(attr.K, attr.V) + } + return nil +} + +func (s *State) findTypeSpec(t string) (*TypeSpec, bool) { + for _, v := range s.config.types { + if v.T == t { + return v, true + } + } + return nil, false +} + +func hclType(spec *TypeSpec, typ *Type) (string, error) { + if spec.Format != nil { + return spec.Format(typ) + } + if len(typeFuncArgs(spec)) == 0 { + return spec.Name, nil + } + args := make([]string, 0, len(spec.Attributes)) + for _, param := range typeFuncArgs(spec) { + arg, ok := findAttr(typ.Attrs, param.Name) + if !ok { + continue + } + args = append(args, valueArgs(param, arg.V)...) + } + // If no args were chosen and the type can be described without a function. + if len(args) == 0 && len(typeFuncReqArgs(spec)) == 0 { + return spec.Name, nil + } + return fmt.Sprintf("%s(%s)", spec.Name, strings.Join(args, ",")), nil +} + +func valueArgs(spec *TypeAttr, v cty.Value) []string { + switch { + case v.Type().IsListType(), v.Type().IsTupleType(), v.Type().IsSetType(), v.Type().IsCollectionType(): + args := make([]string, 0, v.LengthInt()) + for _, v := range v.AsValueSlice() { + args = append(args, valueArgs(spec, v)...) + } + return args + case v.Type() == cty.String: + return []string{strconv.Quote(v.AsString())} + case v.Type() == cty.Number && spec.Kind == reflect.Int: + iv, _ := v.AsBigFloat().Int64() + return []string{strconv.FormatInt(iv, 10)} + case v.Type() == cty.Number: + fv, _ := v.AsBigFloat().Float64() + return []string{strconv.FormatFloat(fv, 'f', -1, 64)} + case v.Type() == cty.Bool: + return []string{strconv.FormatBool(v.True())} + } + return nil +} + +func findAttr(attrs []*Attr, k string) (*Attr, bool) { + for _, attr := range attrs { + if attr.K == k { + return attr, true + } + } + return nil, false +} + +func hclRefTokens(v string) (t hclwrite.Tokens, err error) { + // If it is a reference to a type or an enum. + if !strings.HasPrefix(v, "$") { + return []*hclwrite.Token{{Type: hclsyntax.TokenIdent, Bytes: []byte(v)}}, nil + } + path, err := (&Ref{V: v}).Path() + if err != nil { + return nil, err + } + for i, p := range path { + if i > 0 { + t = append(t, &hclwrite.Token{Type: hclsyntax.TokenDot, Bytes: []byte{'.'}}) + } + t = append(t, &hclwrite.Token{Type: hclsyntax.TokenIdent, Bytes: []byte(p.T)}) + for _, v := range p.V { + switch { + case validIdent(v): + t = append(t, + &hclwrite.Token{Type: hclsyntax.TokenDot, Bytes: []byte{'.'}}, + &hclwrite.Token{Type: hclsyntax.TokenIdent, Bytes: []byte(v)}, + ) + default: + t = append(t, &hclwrite.Token{Type: hclsyntax.TokenOBrack, Bytes: []byte{'['}}) + t = append(t, hclwrite.TokensForValue(cty.StringVal(v))...) + t = append(t, &hclwrite.Token{Type: hclsyntax.TokenCBrack, Bytes: []byte{']'}}) + } + } + } + return t, nil +} + +func hclRawTokens(s string) hclwrite.Tokens { + return hclwrite.Tokens{ + &hclwrite.Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(s), + }, + } +} + +func hclList(items []hclwrite.Tokens) hclwrite.Tokens { + t := hclwrite.Tokens{&hclwrite.Token{ + Type: hclsyntax.TokenOBrack, + Bytes: []byte("["), + }} + for i, item := range items { + if i > 0 { + t = append(t, &hclwrite.Token{Type: hclsyntax.TokenComma, Bytes: []byte(",")}) + } + t = append(t, item...) + } + t = append(t, &hclwrite.Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte("]"), + }) + return t +} + +func forEachBlocks(ctx *hcl.EvalContext, b *hclsyntax.Block) ([]*hclsyntax.Block, error) { + forEach, diags := b.Body.Attributes[forEachAttr].Expr.Value(ctx) + if diags.HasErrors() { + return nil, diags + } + if t := forEach.Type(); !t.IsSetType() && !t.IsObjectType() { + return nil, fmt.Errorf("schemahcl: for_each does not support %s type", t.FriendlyName()) + } + delete(b.Body.Attributes, forEachAttr) + blocks := make([]*hclsyntax.Block, 0, forEach.LengthInt()) + for it := forEach.ElementIterator(); it.Next(); { + k, v := it.Element() + nctx := ctx.NewChild() + nctx.Variables = map[string]cty.Value{ + eachRef: cty.ObjectVal(map[string]cty.Value{ + "key": k, + "value": v, + }), + } + nb, err := copyBlock(nctx, b) + if err != nil { + return nil, fmt.Errorf("schemahcl: evaluate block for value %q: %w", v, err) + } + blocks = append(blocks, nb) + } + return blocks, nil +} + +func copyBlock(ctx *hcl.EvalContext, b *hclsyntax.Block) (*hclsyntax.Block, error) { + nb := &hclsyntax.Block{ + Type: b.Type, + Labels: b.Labels, + Body: &hclsyntax.Body{ + Attributes: make(map[string]*hclsyntax.Attribute), + Blocks: make([]*hclsyntax.Block, 0, len(b.Body.Blocks)), + }, + } + for k, v := range b.Body.Attributes { + x, diags := v.Expr.Value(ctx) + if diags.HasErrors() { + return nil, diags + } + nv := *v + nv.Expr = &hclsyntax.LiteralValueExpr{Val: x} + nb.Body.Attributes[k] = &nv + } + for _, v := range b.Body.Blocks { + nv, err := copyBlock(ctx, v) + if err != nil { + return nil, err + } + nb.Body.Blocks = append(nb.Body.Blocks, nv) + } + return nb, nil +} + +// Eval implements the Evaluator interface. +func (f EvalFunc) Eval(p *hclparse.Parser, i any, input map[string]cty.Value) error { + return f(p, i, input) +} diff --git a/vendor/ariga.io/atlas/schemahcl/opts.go b/vendor/ariga.io/atlas/schemahcl/opts.go new file mode 100644 index 00000000..3e1fd4bf --- /dev/null +++ b/vendor/ariga.io/atlas/schemahcl/opts.go @@ -0,0 +1,261 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schemahcl + +import ( + "errors" + "fmt" + "reflect" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +type ( + // Config configures an unmarshaling. + Config struct { + types []*TypeSpec + newCtx func() *hcl.EvalContext + pathVars map[string]map[string]cty.Value + pathFuncs map[string]map[string]function.Function + datasrc, initblk map[string]func(*hcl.EvalContext, *hclsyntax.Block) (cty.Value, error) + } + // Option configures a Config. + Option func(*Config) +) + +// New returns a State configured with options. +func New(opts ...Option) *State { + cfg := &Config{ + pathVars: make(map[string]map[string]cty.Value), + pathFuncs: make(map[string]map[string]function.Function), + newCtx: func() *hcl.EvalContext { + return stdTypes(&hcl.EvalContext{ + Functions: stdFuncs(), + Variables: make(map[string]cty.Value), + }) + }, + } + for _, opt := range opts { + opt(cfg) + } + return &State{config: cfg} +} + +// WithScopedEnums configured a list of allowed ENUMs to be used in +// the given context, block or attribute. For example, the following +// option allows setting HASH or BTREE to the "using" attribute in +// "index" block. +// +// WithScopedEnums("table.index.type", "HASH", "BTREE") +// +// table "t" { +// ... +// index "i" { +// type = HASH // Allowed. +// type = INVALID // Not Allowed. +// } +// } +func WithScopedEnums(path string, enums ...string) Option { + return func(c *Config) { + vars := make(map[string]cty.Value, len(enums)) + for i := range enums { + vars[enums[i]] = cty.StringVal(enums[i]) + } + c.pathVars[path] = vars + } +} + +// WithVariables registers a list of variables to be injected into the context. +func WithVariables(vars map[string]cty.Value) Option { + return func(c *Config) { + c.newCtx = func() *hcl.EvalContext { + return stdTypes(&hcl.EvalContext{ + Functions: stdFuncs(), + Variables: vars, + }) + } + } +} + +// WithDataSource registers a data source name and its corresponding handler. +// e.g., the example below registers a data source named "text" that returns +// the string defined in the data source block. +// +// WithDataSource("text", func(ctx *hcl.EvalContext, b *hclsyntax.Block) (cty.Value, hcl.Diagnostics) { +// attrs, diags := b.Body.JustAttributes() +// if diags.HasErrors() { +// return cty.NilVal, diags +// } +// v, diags := attrs["value"].Expr.Value(ctx) +// if diags.HasErrors() { +// return cty.NilVal, diags +// } +// return cty.ObjectVal(map[string]cty.Value{"output": v}), nil +// }) +// +// data "text" "hello" { +// value = "hello world" +// } +func WithDataSource(name string, h func(*hcl.EvalContext, *hclsyntax.Block) (cty.Value, error)) Option { + return func(c *Config) { + if c.datasrc == nil { + c.datasrc = make(map[string]func(*hcl.EvalContext, *hclsyntax.Block) (cty.Value, error)) + } + c.datasrc[name] = h + } +} + +// WithInitBlock registers a block that evaluates (first) to a cty.Value, +// has no labels, and can be defined only once. For example: +// +// WithInitBlock("atlas", func(ctx *hcl.EvalContext, b *hclsyntax.Block) (cty.Value, hcl.Diagnostics) { +// attrs, diags := b.Body.JustAttributes() +// if diags.HasErrors() { +// return cty.NilVal, diags +// } +// v, diags := attrs["modules"].Expr.Value(ctx) +// if diags.HasErrors() { +// return cty.NilVal, diags +// } +// return cty.ObjectVal(map[string]cty.Value{"modules": v}), nil +// }) +func WithInitBlock(name string, h func(*hcl.EvalContext, *hclsyntax.Block) (cty.Value, error)) Option { + return func(c *Config) { + if c.initblk == nil { + c.initblk = make(map[string]func(*hcl.EvalContext, *hclsyntax.Block) (cty.Value, error)) + } + c.initblk[name] = h + } +} + +// WithTypes configures the given types as identifiers in the unmarshal +// context. The path controls where the usage of this type is allowed. +func WithTypes(path string, typeSpecs []*TypeSpec) Option { + vars := make(map[string]cty.Value) + funcs := make(map[string]function.Function) + for _, ts := range typeSpecs { + typeSpec := ts + // If no required args exist, register the type as a variable in the HCL context. + if len(typeFuncReqArgs(typeSpec)) == 0 { + typ := &Type{T: typeSpec.T} + vars[typeSpec.Name] = cty.CapsuleVal(ctyTypeSpec, typ) + } + // If func args exist, register the type as a function in HCL. + if len(typeFuncArgs(typeSpec)) > 0 { + funcs[typeSpec.Name] = typeFuncSpec(typeSpec) + } + } + return func(c *Config) { + c.types = append(c.types, typeSpecs...) + c.pathVars[path] = vars + c.pathFuncs[path] = funcs + } +} + +func rawExprImpl() function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + {Name: "def", Type: cty.String, AllowNull: false}, + }, + Type: function.StaticReturnType(ctyRawExpr), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + x := args[0].AsString() + if len(x) == 0 { + return cty.NilVal, errors.New("empty expression") + } + t := &RawExpr{X: x} + return cty.CapsuleVal(ctyRawExpr, t), nil + }, + }) +} + +// typeFuncSpec returns the HCL function for defining the type in the spec. +func typeFuncSpec(typeSpec *TypeSpec) function.Function { + spec := &function.Spec{ + Type: function.StaticReturnType(ctyTypeSpec), + } + for _, arg := range typeFuncArgs(typeSpec) { + if arg.Kind == reflect.Slice || !arg.Required { + spec.VarParam = &function.Parameter{ + Name: "args", + Type: cty.DynamicPseudoType, + } + continue + } + p := function.Parameter{ + Name: arg.Name, + AllowNull: !arg.Required, + } + switch arg.Kind { + case reflect.String: + p.Type = cty.String + case reflect.Int, reflect.Float32, reflect.Int64: + p.Type = cty.Number + case reflect.Bool: + p.Type = cty.Bool + } + spec.Params = append(spec.Params, p) + } + spec.Impl = typeFuncSpecImpl(spec, typeSpec) + return function.New(spec) +} + +// typeFuncSpecImpl returns the function implementation for the HCL function spec. +func typeFuncSpecImpl(_ *function.Spec, typeSpec *TypeSpec) function.ImplFunc { + return func(args []cty.Value, retType cty.Type) (cty.Value, error) { + t := &Type{ + T: typeSpec.T, + } + if len(args) > len(typeSpec.Attributes) && typeSpec.Attributes[len(typeSpec.Attributes)-1].Kind != reflect.Slice { + return cty.NilVal, fmt.Errorf("too many arguments for type definition %q", typeSpec.Name) + } + // TypeRegistry enforces that: + // 1. Required attrs come before optionals + // 2. Slice attrs can only be last + for _, attr := range typeFuncArgs(typeSpec) { + // If the attribute is a slice, read all remaining args into a list value. + if attr.Kind == reflect.Slice { + t.Attrs = append(t.Attrs, &Attr{K: attr.Name, V: cty.ListVal(args)}) + break + } + if len(args) == 0 { + break + } + t.Attrs = append(t.Attrs, &Attr{K: attr.Name, V: args[0]}) + args = args[1:] + } + return cty.CapsuleVal(ctyTypeSpec, t), nil + } +} + +// typeFuncArgs returns the type attributes that are configured via arguments to the +// type definition, for example precision and scale in a decimal definition, i.e `decimal(10,2)`. +func typeFuncArgs(spec *TypeSpec) []*TypeAttr { + var args []*TypeAttr + for _, attr := range spec.Attributes { + // TODO(rotemtam): this should be defined on the TypeSpec. + if attr.Name == "unsigned" { + continue + } + args = append(args, attr) + } + return args +} + +// typeFuncReqArgs returns the required type attributes that are configured via arguments. +// for instance, in MySQL a field may be defined as both `int` and `int(10)`, in this case +// it is not a required parameter. +func typeFuncReqArgs(spec *TypeSpec) []*TypeAttr { + var args []*TypeAttr + for _, arg := range typeFuncArgs(spec) { + if arg.Required { + args = append(args, arg) + } + } + return args +} diff --git a/vendor/ariga.io/atlas/schemahcl/spec.go b/vendor/ariga.io/atlas/schemahcl/spec.go new file mode 100644 index 00000000..472cfe4e --- /dev/null +++ b/vendor/ariga.io/atlas/schemahcl/spec.go @@ -0,0 +1,498 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schemahcl + +import ( + "fmt" + "math/big" + "reflect" + "strings" + + "ariga.io/atlas/sql/schema" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +type ( + // Resource is a generic container for resources described in configurations. + Resource struct { + Name string + Qualifier string + Type string + Attrs []*Attr + Children []*Resource + } + + // Attr is an attribute of a Resource. + Attr struct { + K string + V cty.Value + } + + // Ref implements Value and represents a reference to another Resource. + // The path to a Resource under the root Resource is expressed as "$...." + // recursively. For example, a resource of type "table" that is named "users" and is a direct + // child of the root Resource's address shall be "$table.users". A child resource of that table + // of type "column" and named "id", shall be referenced as "$table.users.$column.id", and so on. + Ref struct { + V string + } + + // RawExpr implements Value and represents any raw expression. + RawExpr struct { + X string + } + + // TypeSpec represents a specification for defining a Type. + TypeSpec struct { + // Name is the identifier for the type in an Atlas DDL document. + Name string + + // T is the database identifier for the type. + T string + Attributes []*TypeAttr + + // RType is the reflect.Type of the schema.Type used to describe the TypeSpec. + // This field is optional and used to determine the TypeSpec in cases where the + // schema.Type does not have a `T` field. + RType reflect.Type + + // Format is an optional formatting function. + // If exists, it will be used instead the registry one. + Format func(*Type) (string, error) + + // FromSpec is an optional function that can be attached + // to the type spec and allows converting the schema spec + // type to a schema type (from document to databse). + FromSpec func(*Type) (schema.Type, error) + + // ToSpec is an optional function that can be attached + // to the type spec and allows converting the schema type + // to a schema spec type (from database to document). + ToSpec func(schema.Type) (*Type, error) + } + + // TypeAttr describes an attribute of a TypeSpec, for example `varchar` fields + // can have a `size` attribute. + TypeAttr struct { + // Name should be a snake_case of related the schema.Type struct field. + Name string + Kind reflect.Kind + Required bool + } + + // Type represents the type of the field in a schema. + Type struct { + T string + Attrs []*Attr + IsRef bool + } +) + +// IsRef indicates if the attribute is a reference type. +func (a *Attr) IsRef() bool { + if !a.V.Type().IsCapsuleType() { + return false + } + _, ok := a.V.EncapsulatedValue().(*Ref) + return ok +} + +// IsRawExpr indicates if the attribute is a RawExpr type. +func (a *Attr) IsRawExpr() bool { + if !a.V.Type().IsCapsuleType() { + return false + } + _, ok := a.V.EncapsulatedValue().(*RawExpr) + return ok +} + +// IsType indicates if the attribute is a type spec. +func (a *Attr) IsType() bool { + if !a.V.Type().IsCapsuleType() { + return false + } + _, ok := a.V.EncapsulatedValue().(*Type) + return ok +} + +// Int returns an int from the Value of the Attr. If The value is not a LiteralValue or the value +// cannot be converted to an integer an error is returned. +func (a *Attr) Int() (int, error) { + i, err := a.Int64() + if err != nil { + return 0, err + } + return int(i), nil +} + +// Int64 returns an int64 from the Value of the Attr. If The value is not a LiteralValue or the value +// cannot be converted to an integer an error is returned. +func (a *Attr) Int64() (i int64, err error) { + if err = gocty.FromCtyValue(a.V, &i); err != nil { + return 0, err + } + return i, nil +} + +// String returns a string from the Value of the Attr. If The value is not a LiteralValue +// an error is returned. String values are expected to be quoted. If the value is not +// properly quoted an error is returned. +func (a *Attr) String() (s string, err error) { + if err = gocty.FromCtyValue(a.V, &s); err != nil { + return "", err + } + return s, nil +} + +// Bool returns a boolean from the Value of the Attr. If The value is not a LiteralValue or the value +// cannot be converted to a boolean an error is returned. +func (a *Attr) Bool() (b bool, err error) { + if err = gocty.FromCtyValue(a.V, &b); err != nil { + return false, err + } + return b, nil +} + +// Ref extracts the reference from the Value of the Attr. +func (a *Attr) Ref() (string, error) { + ref, ok := a.V.EncapsulatedValue().(*Ref) + if !ok { + return "", fmt.Errorf("schema: cannot read attribute %q as ref", a.K) + } + return ref.V, nil +} + +// Type extracts the Type from the Attr. +func (a *Attr) Type() (*Type, error) { + t, ok := a.V.EncapsulatedValue().(*Type) + if !ok { + return nil, fmt.Errorf("schema: cannot read attribute %q as type", a.K) + } + return t, nil +} + +// RawExpr extracts the RawExpr from the Attr. +func (a *Attr) RawExpr() (*RawExpr, error) { + if !a.IsRawExpr() { + return nil, fmt.Errorf("schema: cannot read attribute %q as raw expression", a.K) + } + return a.V.EncapsulatedValue().(*RawExpr), nil +} + +// Refs returns a slice of references. +func (a *Attr) Refs() ([]*Ref, error) { + refs := make([]*Ref, 0, len(a.V.AsValueSlice())) + for _, v := range a.V.AsValueSlice() { + ref, ok := v.EncapsulatedValue().(*Ref) + if !ok { + return nil, fmt.Errorf("schema: cannot read attribute %q as ref", a.K) + } + refs = append(refs, ref) + } + return refs, nil +} + +// Strings returns a slice of strings from the Value of the Attr. If The value is not a ListValue or its +// values cannot be converted to strings an error is returned. +func (a *Attr) Strings() (vs []string, err error) { + if a.V.Type().IsTupleType() { + for _, v := range a.V.AsValueSlice() { + var s string + if err = gocty.FromCtyValue(v, &s); err != nil { + return nil, err + } + vs = append(vs, s) + } + return vs, nil + } + if err = gocty.FromCtyValue(a.V, &vs); err != nil { + return nil, err + } + return vs, nil +} + +// PathIndex represents an index in a reference path. +type PathIndex struct { + T string // type + V []string // identifiers +} + +// Check if the path index is valid. +func (p *PathIndex) Check() error { + if p.T == "" || len(p.V) == 0 { + return fmt.Errorf("schemahcl: missing type or identifier %v", p) + } + for _, v := range p.V { + if v == "" { + return fmt.Errorf("schemahcl: empty identifier %v", p) + } + } + return nil +} + +// ByType returns the path index for the given type. +func (r *Ref) ByType(name string) ([]string, error) { + if r == nil { + return nil, fmt.Errorf("schemahcl: type %q was not found in nil reference", name) + } + path, err := r.Path() + if err != nil { + return nil, err + } + var vs []string + for _, p := range path { + switch { + case p.T != name: + case vs != nil: + return nil, fmt.Errorf("schemahcl: multiple %q found in reference", name) + default: + if err := p.Check(); err != nil { + return nil, err + } + vs = p.V + } + } + if vs == nil { + return nil, fmt.Errorf("schemahcl: missing %q in reference", name) + } + return vs, nil +} + +// Path returns a parsed path including block types and their identifiers. +func (r *Ref) Path() (path []PathIndex, err error) { + for i := 0; i < len(r.V); i++ { + var part PathIndex + switch idx := strings.IndexAny(r.V[i:], ".["); { + case r.V[i] != '$': + return nil, fmt.Errorf("schemahcl: missing type in reference %q", r.V[i:]) + case idx == -1: + return nil, fmt.Errorf("schemahcl: missing identifier in reference %q", r.V[i:]) + default: + part.T = r.V[i+1 : i+idx] + i += idx + } + Ident: + for i < len(r.V) { + switch { + // End of identifier before a type. + case strings.HasPrefix(r.V[i:], ".$"): + break Ident + // Scan identifier. + case r.V[i] == '.': + v := r.V[i+1:] + if idx := strings.IndexAny(v, ".["); idx != -1 { + v = v[:idx] + } + part.V = append(part.V, v) + i += 1 + len(v) + // Scan attribute (["..."]). + case strings.HasPrefix(r.V[i:], "[\""): + idx := scanString(r.V[i+2:]) + if idx == -1 { + return nil, fmt.Errorf("schemahcl: unterminated string in reference %q", r.V[i:]) + } + v := r.V[i+2 : i+2+idx] + i += 2 + idx + if !strings.HasPrefix(r.V[i:], "\"]") { + return nil, fmt.Errorf("schemahcl: missing ']' in reference %q", r.V[i:]) + } + part.V = append(part.V, v) + i += 2 + default: + return nil, fmt.Errorf("schemahcl: invalid character in reference %q", r.V[i:]) + } + } + if err := part.Check(); err != nil { + return nil, err + } + path = append(path, part) + } + return +} + +// BuildRef from a path. +func BuildRef(path []PathIndex) *Ref { + var v string + for _, p := range path { + switch { + case len(p.V) == 1: + v = addr(v, p.T, p.V[0], "") + case len(p.V) == 2: + v = addr(v, p.T, p.V[1], p.V[0]) + default: + v = addr(v, p.T, "", "") + } + } + return &Ref{V: v} +} + +func scanString(s string) int { + for i := 0; i < len(s); i++ { + switch s[i] { + case '\\': + i++ + case '"': + return i + } + } + return -1 +} + +// Bools returns a slice of bools from the Value of the Attr. If The value is not a ListValue or its +// values cannot be converted to bools an error is returned. +func (a *Attr) Bools() (vs []bool, err error) { + if a.V.Type().IsTupleType() { + for _, v := range a.V.AsValueSlice() { + var b bool + if err = gocty.FromCtyValue(v, &b); err != nil { + return nil, err + } + vs = append(vs, b) + } + return vs, nil + } + if err = gocty.FromCtyValue(a.V, &vs); err != nil { + return nil, err + } + return vs, nil +} + +// Resource returns the first child Resource by its type and reports whether it was found. +func (r *Resource) Resource(t string) (*Resource, bool) { + if r == nil { + return nil, false + } + for i := range r.Children { + if r.Children[i].Type == t { + return r.Children[i], true + } + } + return nil, false +} + +// Attr returns the Attr by the provided name and reports whether it was found. +func (r *Resource) Attr(name string) (*Attr, bool) { + return attrVal(r.Attrs, name) +} + +// SetAttr sets the Attr on the Resource. If r is nil, a zero value Resource +// is initialized. If an Attr with the same key exists, it is replaced by attr. +func (r *Resource) SetAttr(attr *Attr) { + if r == nil { + *r = Resource{} + } + r.Attrs = replaceOrAppendAttr(r.Attrs, attr) +} + +// MarshalSpec implements Marshaler. +func (f MarshalerFunc) MarshalSpec(v any) ([]byte, error) { + return f(v) +} + +func attrVal(attrs []*Attr, name string) (*Attr, bool) { + for _, attr := range attrs { + if attr.K == name { + return attr, true + } + } + return nil, false +} + +func replaceOrAppendAttr(attrs []*Attr, attr *Attr) []*Attr { + for i, v := range attrs { + if v.K == attr.K { + attrs[i] = attr + return attrs + } + } + return append(attrs, attr) +} + +// Attr returns a TypeAttr by name and reports if one was found. +func (s *TypeSpec) Attr(name string) (*TypeAttr, bool) { + for _, ta := range s.Attributes { + if ta.Name == name { + return ta, true + } + } + return nil, false +} + +var _ Marshaler = MarshalerFunc(nil) + +// StringAttr is a helper method for constructing *schemahcl.Attr instances that contain string value. +func StringAttr(k string, v string) *Attr { + return &Attr{ + K: k, + V: cty.StringVal(v), + } +} + +// IntAttr is a helper method for constructing *schemahcl.Attr instances that contain int64 value. +func IntAttr(k string, v int) *Attr { + return Int64Attr(k, int64(v)) +} + +// Int64Attr is a helper method for constructing *schemahcl.Attr instances that contain int64 value. +func Int64Attr(k string, v int64) *Attr { + return &Attr{ + K: k, + V: cty.NumberVal(new(big.Float).SetInt64(v).SetPrec(512)), + } +} + +// BoolAttr is a helper method for constructing *schemahcl.Attr instances that contain a boolean value. +func BoolAttr(k string, v bool) *Attr { + return &Attr{ + K: k, + V: cty.BoolVal(v), + } +} + +// RefAttr is a helper method for constructing *schemahcl.Attr instances that contain a Ref value. +func RefAttr(k string, v *Ref) *Attr { + return &Attr{ + K: k, + V: cty.CapsuleVal(ctyRefType, v), + } +} + +// StringsAttr is a helper method for constructing *schemahcl.Attr instances that contain list strings. +func StringsAttr(k string, vs ...string) *Attr { + vv := make([]cty.Value, len(vs)) + for i, v := range vs { + vv[i] = cty.StringVal(v) + } + return &Attr{ + K: k, + V: cty.ListVal(vv), + } +} + +// RefsAttr is a helper method for constructing *schemahcl.Attr instances that contain list references. +func RefsAttr(k string, refs ...*Ref) *Attr { + vv := make([]cty.Value, len(refs)) + for i, v := range refs { + vv[i] = cty.CapsuleVal(ctyRefType, v) + } + return &Attr{ + K: k, + V: cty.ListVal(vv), + } +} + +// RawAttr is a helper method for constructing *schemahcl.Attr instances that contain RawExpr value. +func RawAttr(k string, x string) *Attr { + return &Attr{ + K: k, + V: RawExprValue(&RawExpr{X: x}), + } +} + +// RawExprValue is a helper method for constructing a cty.Value that capsules a raw expression. +func RawExprValue(x *RawExpr) cty.Value { + return cty.CapsuleVal(ctyRawExpr, x) +} diff --git a/vendor/ariga.io/atlas/schemahcl/stdlib.go b/vendor/ariga.io/atlas/schemahcl/stdlib.go new file mode 100644 index 00000000..a3234c3f --- /dev/null +++ b/vendor/ariga.io/atlas/schemahcl/stdlib.go @@ -0,0 +1,308 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schemahcl + +import ( + "net/url" + "strconv" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/tryfunc" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +func stdTypes(ctx *hcl.EvalContext) *hcl.EvalContext { + ctx = ctx.NewChild() + ctx.Variables = map[string]cty.Value{ + "string": cty.CapsuleVal(ctyNilType, &cty.String), + "bool": cty.CapsuleVal(ctyNilType, &cty.Bool), + "number": cty.CapsuleVal(ctyNilType, &cty.Number), + // Exists for backwards compatibility. + "int": cty.CapsuleVal(ctyNilType, &cty.Number), + } + ctx.Functions = map[string]function.Function{ + "list": function.New(&function.Spec{ + Params: []function.Parameter{ + {Name: "elem_type", Type: ctyNilType}, + }, + Type: function.StaticReturnType(ctyNilType), + Impl: func(args []cty.Value, _ cty.Type) (cty.Value, error) { + argT := args[0].EncapsulatedValue().(*cty.Type) + listT := cty.List(*argT) + return cty.CapsuleVal(ctyNilType, &listT), nil + }, + }), + "set": function.New(&function.Spec{ + Params: []function.Parameter{ + {Name: "elem_type", Type: ctyNilType}, + }, + Type: function.StaticReturnType(ctyNilType), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + argT := args[0].EncapsulatedValue().(*cty.Type) + setT := cty.Set(*argT) + return cty.CapsuleVal(ctyNilType, &setT), nil + }, + }), + "map": function.New(&function.Spec{ + Params: []function.Parameter{ + {Name: "elem_type", Type: ctyNilType}, + }, + Type: function.StaticReturnType(ctyNilType), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + argT := args[0].EncapsulatedValue().(*cty.Type) + mapT := cty.Map(*argT) + return cty.CapsuleVal(ctyNilType, &mapT), nil + }, + }), + "tuple": function.New(&function.Spec{ + Params: []function.Parameter{ + {Name: "elem_type", Type: cty.List(ctyNilType)}, + }, + Type: function.StaticReturnType(ctyNilType), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + argV := args[0] + argsT := make([]cty.Type, 0, argV.LengthInt()) + for it := argV.ElementIterator(); it.Next(); { + _, ev := it.Element() + argsT = append(argsT, *ev.EncapsulatedValue().(*cty.Type)) + } + tupleT := cty.Tuple(argsT) + return cty.CapsuleVal(ctyNilType, &tupleT), nil + }, + }), + "object": function.New(&function.Spec{ + Params: []function.Parameter{ + {Name: "attr_type", Type: cty.Map(ctyNilType)}, + }, + Type: function.StaticReturnType(ctyNilType), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + argV := args[0] + argsT := make(map[string]cty.Type) + for it := argV.ElementIterator(); it.Next(); { + nameV, typeV := it.Element() + name := nameV.AsString() + argsT[name] = *typeV.EncapsulatedValue().(*cty.Type) + } + objT := cty.Object(argsT) + return cty.CapsuleVal(ctyNilType, &objT), nil + }, + }), + } + return ctx +} + +// standard functions exist in schemahcl language. +func stdFuncs() map[string]function.Function { + return map[string]function.Function{ + "abs": stdlib.AbsoluteFunc, + "ceil": stdlib.CeilFunc, + "chomp": stdlib.ChompFunc, + "chunklist": stdlib.ChunklistFunc, + "coalescelist": stdlib.CoalesceListFunc, + "compact": stdlib.CompactFunc, + "concat": stdlib.ConcatFunc, + "contains": stdlib.ContainsFunc, + "csvdecode": stdlib.CSVDecodeFunc, + "distinct": stdlib.DistinctFunc, + "element": stdlib.ElementFunc, + "flatten": stdlib.FlattenFunc, + "floor": stdlib.FloorFunc, + "format": stdlib.FormatFunc, + "formatdate": stdlib.FormatDateFunc, + "formatlist": stdlib.FormatListFunc, + "indent": stdlib.IndentFunc, + "index": stdlib.IndexFunc, + "join": stdlib.JoinFunc, + "jsondecode": stdlib.JSONDecodeFunc, + "jsonencode": stdlib.JSONEncodeFunc, + "keys": stdlib.KeysFunc, + "log": stdlib.LogFunc, + "lower": stdlib.LowerFunc, + "max": stdlib.MaxFunc, + "merge": stdlib.MergeFunc, + "min": stdlib.MinFunc, + "parseint": stdlib.ParseIntFunc, + "pow": stdlib.PowFunc, + "range": stdlib.RangeFunc, + "regex": stdlib.RegexFunc, + "regexall": stdlib.RegexAllFunc, + "regexreplace": stdlib.RegexReplaceFunc, + "reverse": stdlib.ReverseListFunc, + "setintersection": stdlib.SetIntersectionFunc, + "setproduct": stdlib.SetProductFunc, + "setsubtract": stdlib.SetSubtractFunc, + "setunion": stdlib.SetUnionFunc, + "signum": stdlib.SignumFunc, + "slice": stdlib.SliceFunc, + "sort": stdlib.SortFunc, + "split": stdlib.SplitFunc, + "strrev": stdlib.ReverseFunc, + "substr": stdlib.SubstrFunc, + "timeadd": stdlib.TimeAddFunc, + "title": stdlib.TitleFunc, + "tobool": makeToFunc(cty.Bool), + "tolist": makeToFunc(cty.List(cty.DynamicPseudoType)), + "tonumber": makeToFunc(cty.Number), + "toset": makeToFunc(cty.Set(cty.DynamicPseudoType)), + "tostring": makeToFunc(cty.String), + "trim": stdlib.TrimFunc, + "trimprefix": stdlib.TrimPrefixFunc, + "trimspace": stdlib.TrimSpaceFunc, + "trimsuffix": stdlib.TrimSuffixFunc, + "try": tryfunc.TryFunc, + "upper": stdlib.UpperFunc, + "urlescape": urlEscape, + "urlqueryset": urlQuerySetFunc, + "urlsetpath": urlSetPathFunc, + "values": stdlib.ValuesFunc, + "zipmap": stdlib.ZipmapFunc, + // A patch from the past. Should be moved + // to specific scopes in the future. + "sql": rawExprImpl(), + } +} + +// makeToFunc constructs a "to..." function, like "tostring", which converts +// its argument to a specific type or type kind. Code was copied from: +// github.com/hashicorp/terraform/blob/master/internal/lang/funcs/conversion.go +func makeToFunc(wantTy cty.Type) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "v", + // We use DynamicPseudoType rather than wantTy here so that + // all values will pass through the function API verbatim and + // we can handle the conversion logic within the Type and + // Impl functions. This allows us to customize the error + // messages to be more appropriate for an explicit type + // conversion, whereas the cty function system produces + // messages aimed at _implicit_ type conversions. + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + gotTy := args[0].Type() + if gotTy.Equals(wantTy) { + return wantTy, nil + } + conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) + if conv == nil { + // We'll use some specialized errors for some trickier cases, + // but most we can handle in a simple way. + switch { + case gotTy.IsTupleType() && wantTy.IsTupleType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + case gotTy.IsObjectType() && wantTy.IsObjectType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + default: + return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + // If a conversion is available then everything is fine. + return wantTy, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // We didn't set "AllowUnknown" on our argument, so it is guaranteed + // to be known here but may still be null. + ret, err := convert.Convert(args[0], retType) + if err != nil { + val, _ := args[0].UnmarkDeep() + // Because we used GetConversionUnsafe above, conversion can + // still potentially fail in here. For example, if the user + // asks to convert the string "a" to bool then we'll + // optimistically permit it during type checking but fail here + // once we note that the value isn't either "true" or "false". + gotTy := val.Type() + switch { + case gotTy == cty.String && wantTy == cty.Bool: + what := "string" + if !val.IsNull() { + what = strconv.Quote(val.AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) + case gotTy == cty.String && wantTy == cty.Number: + what := "string" + if !val.IsNull() { + what = strconv.Quote(val.AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) + default: + return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + return ret, nil + }, + }) +} + +var urlQuerySetFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "url", + Type: cty.String, + }, + { + Name: "key", + Type: cty.String, + }, + { + Name: "value", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + u, err := url.Parse(args[0].AsString()) + if err != nil { + return cty.NilVal, err + } + q := u.Query() + q.Set(args[1].AsString(), args[2].AsString()) + u.RawQuery = q.Encode() + return cty.StringVal(u.String()), nil + }, +}) + +var urlSetPathFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "url", + Type: cty.String, + }, + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + u, err := url.Parse(args[0].AsString()) + if err != nil { + return cty.NilVal, err + } + u.Path = args[1].AsString() + return cty.StringVal(u.String()), nil + }, +}) + +var urlEscape = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "string", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + u := url.QueryEscape(args[0].AsString()) + return cty.StringVal(u), nil + }, +}) diff --git a/vendor/ariga.io/atlas/schemahcl/types.go b/vendor/ariga.io/atlas/schemahcl/types.go new file mode 100644 index 00000000..c4c5272e --- /dev/null +++ b/vendor/ariga.io/atlas/schemahcl/types.go @@ -0,0 +1,385 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schemahcl + +import ( + "errors" + "fmt" + "log" + "reflect" + "strings" + + "ariga.io/atlas/sql/schema" + + "github.com/go-openapi/inflect" +) + +// PrintType returns the string representation of a column type which can be parsed +// by the driver into a schema.Type. +func (r *TypeRegistry) PrintType(typ *Type) (string, error) { + spec, ok := r.findT(typ.T) + if !ok { + return "", fmt.Errorf("specutil: type %q not found in registry", typ.T) + } + if len(spec.Attributes) == 0 { + return typ.T, nil + } + var ( + args []string + mid, suffix string + ) + for _, arg := range typ.Attrs { + // TODO(rotemtam): make this part of the TypeSpec + if arg.K == "unsigned" { + b, err := arg.Bool() + if err != nil { + return "", err + } + if b { + suffix += " unsigned" + } + continue + } + attr, ok := spec.Attr(arg.K) + if !ok { + return "", fmt.Errorf("specutil: attribute %q not found in typespec %q", arg.K, typ.T) + } + args = append(args, valueArgs(attr, arg.V)...) + } + if len(args) > 0 { + mid = "(" + strings.Join(args, ",") + ")" + } + return typ.T + mid + suffix, nil +} + +// TypeRegistry is a collection of *schemahcl.TypeSpec. +type TypeRegistry struct { + r []*TypeSpec + spec func(schema.Type) (*Type, error) + parser func(string) (schema.Type, error) +} + +// WithFormatter configures the registry to use a formatting function for printing +// schema.Type as string. +func WithFormatter(f func(schema.Type) (string, error)) TypeRegistryOption { + return func(registry *TypeRegistry) error { + registry.spec = func(t schema.Type) (*Type, error) { + s, err := f(t) + if err != nil { + return nil, fmt.Errorf("specutil: cannot format type %T: %w", t, err) + } + return &Type{T: s}, nil + } + return nil + } +} + +// WithSpecFunc configures the registry to use the given function for converting +// a schema.Type to schemahcl.Type +func WithSpecFunc(spec func(schema.Type) (*Type, error)) TypeRegistryOption { + return func(registry *TypeRegistry) error { + registry.spec = spec + return nil + } +} + +// WithParser configures the registry to use a parsing function for converting +// a string to a schema.Type. +func WithParser(parser func(string) (schema.Type, error)) TypeRegistryOption { + return func(registry *TypeRegistry) error { + registry.parser = parser + return nil + } +} + +// Register adds one or more TypeSpec to the registry. +func (r *TypeRegistry) Register(specs ...*TypeSpec) error { + for _, s := range specs { + if err := validSpec(s); err != nil { + return fmt.Errorf("specutil: invalid typespec %q: %w", s.Name, err) + } + if _, exists := r.findT(s.T); exists { + return fmt.Errorf("specutil: type with T of %q already registered", s.T) + } + if _, exists := r.findName(s.Name); exists { + return fmt.Errorf("specutil: type with name of %q already registered", s.T) + } + r.r = append(r.r, s) + } + return nil +} + +func validSpec(typeSpec *TypeSpec) error { + var seenOptional bool + for i, attr := range typeSpec.Attributes { + if attr.Kind == reflect.Slice && i < len(typeSpec.Attributes)-1 { + return fmt.Errorf("attr %q is of kind slice but not last", attr.Name) + } + if seenOptional && attr.Required { + return fmt.Errorf("attr %q required after optional attr", attr.Name) + } + seenOptional = !attr.Required + } + return nil +} + +// TypeRegistryOption configures a TypeRegistry. +type TypeRegistryOption func(*TypeRegistry) error + +// WithSpecs configures the registry to register the given list of type specs. +func WithSpecs(specs ...*TypeSpec) TypeRegistryOption { + return func(registry *TypeRegistry) error { + if err := registry.Register(specs...); err != nil { + return fmt.Errorf("failed registering types: %s", err) + } + return nil + } +} + +// NewRegistry creates a new *TypeRegistry, registers the provided types and panics +// if an error occurs. +func NewRegistry(opts ...TypeRegistryOption) *TypeRegistry { + r := &TypeRegistry{} + for _, opt := range opts { + if err := opt(r); err != nil { + log.Fatalf("failed configuring registry: %s", err) + } + } + return r +} + +// findName searches the registry for types that have the provided name. +func (r *TypeRegistry) findName(name string) (*TypeSpec, bool) { + for _, current := range r.r { + if current.Name == name { + return current, true + } + } + return nil, false +} + +// findT searches the registry for types that have the provided T. +func (r *TypeRegistry) findT(t string) (*TypeSpec, bool) { + for _, current := range r.r { + if current.T == t { + return current, true + } + } + return nil, false +} + +// Convert converts the schema.Type to a *schemahcl.Type. +func (r *TypeRegistry) Convert(typ schema.Type) (*Type, error) { + if ut, ok := typ.(*schema.UnsupportedType); ok { + return &Type{ + T: ut.T, + }, nil + } + rv := reflect.ValueOf(typ) + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + if !rv.IsValid() { + return nil, errors.New("specutil: invalid schema.Type on Convert") + } + typeSpec, ok := r.findType(rv) + if !ok { + return r.spec(typ) + } + if typeSpec.ToSpec != nil { + return typeSpec.ToSpec(typ) + } + s := &Type{T: typeSpec.T} + // Iterate the attributes in reverse order, so we can skip zero value and optional attrs. + for i := len(typeSpec.Attributes) - 1; i >= 0; i-- { + attr := typeSpec.Attributes[i] + n := inflect.Camelize(attr.Name) + field := rv.FieldByName(n) + // If TypeSpec has an attribute that isn't mapped to a field on the schema.Type skip it. + if !field.IsValid() || field.Kind() == reflect.Ptr && field.IsNil() { + continue + } + if field = reflect.Indirect(field); field.Kind() != attr.Kind { + return nil, errors.New("incompatible kinds on typespec attr and typefield") + } + switch attr.Kind { + case reflect.Int, reflect.Int64: + v := int(field.Int()) + if v == 0 && len(s.Attrs) == 0 { + break + } + s.Attrs = append([]*Attr{IntAttr(attr.Name, v)}, s.Attrs...) + case reflect.Bool: + v := field.Bool() + if !v && len(s.Attrs) == 0 { + break + } + s.Attrs = append([]*Attr{BoolAttr(attr.Name, v)}, s.Attrs...) + case reflect.String: + v := field.String() + if v == "" && len(s.Attrs) == 0 { + break + } + s.Attrs = append([]*Attr{StringAttr(attr.Name, v)}, s.Attrs...) + case reflect.Slice: + vs, ok := field.Interface().([]string) + if !ok { + return nil, fmt.Errorf("specutil: unsupported slice type %T", field.Interface()) + } + s.Attrs = append([]*Attr{StringsAttr(attr.Name, vs...)}, s.Attrs...) + default: + return nil, fmt.Errorf("specutil: unsupported attr kind %s for attribute %q of %q", attr.Kind, attr.Name, typeSpec.Name) + } + } + return s, nil +} + +func (r *TypeRegistry) findType(rv reflect.Value) (*TypeSpec, bool) { + tf := rv.FieldByName("T") + if tf.IsValid() && tf.Kind() == reflect.String { + name := tf.String() + if typeSpec, ok := r.findT(name); ok { + return typeSpec, true + } + } + if typeSpec, ok := r.findRType(rv.Type()); ok { + return typeSpec, true + } + return nil, false +} + +func (r *TypeRegistry) findRType(rt reflect.Type) (*TypeSpec, bool) { + for _, ts := range r.Specs() { + if ts.RType != nil && ts.RType == rt { + return ts, true + } + } + return nil, false +} + +// Specs returns the TypeSpecs in the registry. +func (r *TypeRegistry) Specs() []*TypeSpec { + return r.r +} + +// Type converts a *schemahcl.Type into a schema.Type. +func (r *TypeRegistry) Type(typ *Type, extra []*Attr) (schema.Type, error) { + typeSpec, ok := r.findT(typ.T) + if !ok { + return r.parser(typ.T) + } + nfa := typeNonFuncArgs(typeSpec) + picked := pickTypeAttrs(extra, nfa) + cp := &Type{ + T: typ.T, + } + cp.Attrs = appendIfNotExist(typ.Attrs, picked) + if typeSpec.FromSpec != nil { + return typeSpec.FromSpec(cp) + } + printType, err := r.PrintType(cp) + if err != nil { + return nil, err + } + return r.parser(printType) +} + +// TypeSpecOption configures a schemahcl.TypeSpec. +type TypeSpecOption func(*TypeSpec) + +// WithAttributes returns an attributes TypeSpecOption. +func WithAttributes(attrs ...*TypeAttr) TypeSpecOption { + return func(spec *TypeSpec) { + spec.Attributes = attrs + } +} + +// WithTypeFormatter allows overriding the Format function for the Type. +func WithTypeFormatter(f func(*Type) (string, error)) TypeSpecOption { + return func(spec *TypeSpec) { + spec.Format = f + } +} + +// WithFromSpec allows configuring the FromSpec convert function using functional options. +func WithFromSpec(f func(*Type) (schema.Type, error)) TypeSpecOption { + return func(spec *TypeSpec) { + spec.FromSpec = f + } +} + +// WithToSpec allows configuring the ToSpec convert function using functional options. +func WithToSpec(f func(schema.Type) (*Type, error)) TypeSpecOption { + return func(spec *TypeSpec) { + spec.ToSpec = f + } +} + +// NewTypeSpec returns a TypeSpec with the provided name. +func NewTypeSpec(name string, opts ...TypeSpecOption) *TypeSpec { + return AliasTypeSpec(name, name, opts...) +} + +// AliasTypeSpec returns a TypeSpec with the provided name. +func AliasTypeSpec(name, dbType string, opts ...TypeSpecOption) *TypeSpec { + ts := &TypeSpec{ + Name: name, + T: dbType, + } + for _, opt := range opts { + opt(ts) + } + return ts +} + +// SizeTypeAttr returns a TypeAttr for a size attribute. +func SizeTypeAttr(required bool) *TypeAttr { + return &TypeAttr{ + Name: "size", + Kind: reflect.Int, + Required: required, + } +} + +// typeNonFuncArgs returns the type attributes that are NOT configured via arguments to the +// type definition, `int unsigned`. +func typeNonFuncArgs(spec *TypeSpec) []*TypeAttr { + var args []*TypeAttr + for _, attr := range spec.Attributes { + // TODO(rotemtam): this should be defined on the TypeSpec. + if attr.Name == "unsigned" { + args = append(args, attr) + } + } + return args +} + +// pickTypeAttrs returns the relevant Attrs matching the wanted TypeAttrs. +func pickTypeAttrs(src []*Attr, wanted []*TypeAttr) []*Attr { + keys := make(map[string]struct{}) + for _, w := range wanted { + keys[w.Name] = struct{}{} + } + var picked []*Attr + for _, attr := range src { + if _, ok := keys[attr.K]; ok { + picked = append(picked, attr) + } + } + return picked +} + +func appendIfNotExist(base []*Attr, additional []*Attr) []*Attr { + exists := make(map[string]struct{}) + for _, attr := range base { + exists[attr.K] = struct{}{} + } + for _, attr := range additional { + if _, ok := exists[attr.K]; !ok { + base = append(base, attr) + } + } + return base +} diff --git a/vendor/ariga.io/atlas/sql/internal/specutil/BUILD b/vendor/ariga.io/atlas/sql/internal/specutil/BUILD new file mode 100644 index 00000000..6d4646c2 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/internal/specutil/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "specutil", + srcs = [ + "convert.go", + "spec.go", + ], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/internal/specutil", + importpath = "ariga.io/atlas/sql/internal/specutil", + visibility = [ + "//third_party:__subpackages__", + "//vendor/ariga.io/atlas/sql:__subpackages__", + ], + deps = [ + "//vendor/ariga.io/atlas/schemahcl", + "//vendor/ariga.io/atlas/sql/internal/sqlx", + "//vendor/ariga.io/atlas/sql/schema", + "//vendor/ariga.io/atlas/sql/sqlspec", + "//vendor/github.com/hashicorp/hcl/v2/hclparse", + "//vendor/github.com/zclconf/go-cty/cty", + ], +) diff --git a/vendor/ariga.io/atlas/sql/internal/specutil/convert.go b/vendor/ariga.io/atlas/sql/internal/specutil/convert.go new file mode 100644 index 00000000..4d2d1db8 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/internal/specutil/convert.go @@ -0,0 +1,733 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package specutil + +import ( + "fmt" + "strconv" + "strings" + + "ariga.io/atlas/schemahcl" + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/schema" + "ariga.io/atlas/sql/sqlspec" + + "github.com/zclconf/go-cty/cty" +) + +// List of convert function types. +type ( + ConvertTableFunc func(*sqlspec.Table, *schema.Schema) (*schema.Table, error) + ConvertColumnFunc func(*sqlspec.Column, *schema.Table) (*schema.Column, error) + ConvertTypeFunc func(*sqlspec.Column) (schema.Type, error) + ConvertPrimaryKeyFunc func(*sqlspec.PrimaryKey, *schema.Table) (*schema.Index, error) + ConvertIndexFunc func(*sqlspec.Index, *schema.Table) (*schema.Index, error) + ConvertCheckFunc func(*sqlspec.Check) (*schema.Check, error) + ColumnSpecFunc func(*schema.Column, *schema.Table) (*sqlspec.Column, error) + ColumnTypeSpecFunc func(schema.Type) (*sqlspec.Column, error) + TableSpecFunc func(*schema.Table) (*sqlspec.Table, error) + PrimaryKeySpecFunc func(*schema.Index) (*sqlspec.PrimaryKey, error) + IndexSpecFunc func(*schema.Index) (*sqlspec.Index, error) + ForeignKeySpecFunc func(*schema.ForeignKey) (*sqlspec.ForeignKey, error) + CheckSpecFunc func(*schema.Check) *sqlspec.Check +) + +// Scan populates the Realm from the schemas and table specs. +func Scan(r *schema.Realm, schemas []*sqlspec.Schema, tables []*sqlspec.Table, convertTable ConvertTableFunc) error { + byName := make(map[string]*schema.Schema) + for _, spec := range schemas { + s := &schema.Schema{Name: spec.Name, Realm: r} + r.AddSchemas(s) + byName[spec.Name] = s + } + for _, spec := range tables { + name, err := SchemaName(spec.Schema) + if err != nil { + return fmt.Errorf("specutil: cannot extract schema name for table %q: %w", spec.Name, err) + } + s, ok := byName[name] + if !ok { + return fmt.Errorf("specutil: schema %q not found for table %q", name, spec.Name) + } + t, err := convertTable(spec, s) + if err != nil { + return fmt.Errorf("specutil: cannot convert table %q: %w", spec.Name, err) + } + s.AddTables(t) + } + // Link the foreign keys. + for _, s := range r.Schemas { + for _, t := range s.Tables { + spec, err := findTableSpec(tables, s.Name, t.Name) + if err != nil { + return err + } + if err := linkForeignKeys(t, s, spec); err != nil { + return err + } + } + } + return nil +} + +// findTableSpec searches tableSpecs for a spec of a table named tableName in a schema named schemaName. +func findTableSpec(tableSpecs []*sqlspec.Table, schemaName, tableName string) (*sqlspec.Table, error) { + for _, tbl := range tableSpecs { + n, err := SchemaName(tbl.Schema) + if err != nil { + return nil, err + } + if n == schemaName && tbl.Name == tableName { + return tbl, nil + } + } + return nil, fmt.Errorf("table %s.%s not found", schemaName, tableName) +} + +// Table converts a sqlspec.Table to a schema.Table. Table conversion is done without converting +// ForeignKeySpecs into ForeignKeys, as the target tables do not necessarily exist in the schema +// at this point. Instead, the linking is done by the Schema function. +func Table(spec *sqlspec.Table, parent *schema.Schema, convertColumn ConvertColumnFunc, + convertPK ConvertPrimaryKeyFunc, convertIndex ConvertIndexFunc, convertCheck ConvertCheckFunc) (*schema.Table, error) { + tbl := &schema.Table{ + Name: spec.Name, + Schema: parent, + } + for _, csp := range spec.Columns { + col, err := convertColumn(csp, tbl) + if err != nil { + return nil, err + } + tbl.Columns = append(tbl.Columns, col) + } + if spec.PrimaryKey != nil { + pk, err := convertPK(spec.PrimaryKey, tbl) + if err != nil { + return nil, err + } + tbl.PrimaryKey = pk + } + for _, idx := range spec.Indexes { + i, err := convertIndex(idx, tbl) + if err != nil { + return nil, err + } + tbl.Indexes = append(tbl.Indexes, i) + } + for _, c := range spec.Checks { + c, err := convertCheck(c) + if err != nil { + return nil, err + } + tbl.AddChecks(c) + } + if err := convertCommentFromSpec(spec, &tbl.Attrs); err != nil { + return nil, err + } + return tbl, nil +} + +// Column converts a sqlspec.Column into a schema.Column. +func Column(spec *sqlspec.Column, conv ConvertTypeFunc) (*schema.Column, error) { + out := &schema.Column{ + Name: spec.Name, + Type: &schema.ColumnType{ + Null: spec.Null, + }, + } + if d := spec.Default; !d.IsNull() { + switch { + case d.Type() == cty.String: + out.Default = &schema.Literal{V: d.AsString()} + case d.Type() == cty.Number: + out.Default = &schema.Literal{V: d.AsBigFloat().String()} + case d.Type() == cty.Bool: + out.Default = &schema.Literal{V: strconv.FormatBool(d.True())} + case d.Type().IsCapsuleType(): + x, ok := d.EncapsulatedValue().(*schemahcl.RawExpr) + if !ok { + return nil, fmt.Errorf("invalid default value %q", d.Type().FriendlyName()) + } + out.Default = &schema.RawExpr{X: x.X} + default: + return nil, fmt.Errorf("unsupported value type for default: %T", d) + } + } + ct, err := conv(spec) + if err != nil { + return nil, err + } + out.Type.Type = ct + if err := convertCommentFromSpec(spec, &out.Attrs); err != nil { + return nil, err + } + return out, err +} + +// Index converts a sqlspec.Index to a schema.Index. The optional arguments allow +// passing functions for mutating the created index-part (e.g. add attributes). +func Index(spec *sqlspec.Index, parent *schema.Table, partFns ...func(*sqlspec.IndexPart, *schema.IndexPart) error) (*schema.Index, error) { + parts := make([]*schema.IndexPart, 0, len(spec.Columns)+len(spec.Parts)) + switch n, m := len(spec.Columns), len(spec.Parts); { + case n == 0 && m == 0: + return nil, fmt.Errorf("missing definition for index %q", spec.Name) + case n > 0 && m > 0: + return nil, fmt.Errorf(`multiple definitions for index %q, use "columns" or "on"`, spec.Name) + case n > 0: + for i, c := range spec.Columns { + c, err := ColumnByRef(parent, c) + if err != nil { + return nil, err + } + parts = append(parts, &schema.IndexPart{ + SeqNo: i, + C: c, + }) + } + case m > 0: + for i, p := range spec.Parts { + part := &schema.IndexPart{SeqNo: i, Desc: p.Desc} + switch { + case p.Column == nil && p.Expr == "": + return nil, fmt.Errorf(`"column" or "expr" are required for index %q at position %d`, spec.Name, i) + case p.Column != nil && p.Expr != "": + return nil, fmt.Errorf(`cannot use both "column" and "expr" in index %q at position %d`, spec.Name, i) + case p.Expr != "": + part.X = &schema.RawExpr{X: p.Expr} + case p.Column != nil: + c, err := ColumnByRef(parent, p.Column) + if err != nil { + return nil, err + } + part.C = c + } + for _, f := range partFns { + if err := f(p, part); err != nil { + return nil, err + } + } + parts = append(parts, part) + } + } + i := &schema.Index{ + Name: spec.Name, + Unique: spec.Unique, + Table: parent, + Parts: parts, + } + if err := convertCommentFromSpec(spec, &i.Attrs); err != nil { + return nil, err + } + return i, nil +} + +// Check converts a sqlspec.Check to a schema.Check. +func Check(spec *sqlspec.Check) (*schema.Check, error) { + return &schema.Check{ + Name: spec.Name, + Expr: spec.Expr, + }, nil +} + +// PrimaryKey converts a sqlspec.PrimaryKey to a schema.Index. +func PrimaryKey(spec *sqlspec.PrimaryKey, parent *schema.Table) (*schema.Index, error) { + parts := make([]*schema.IndexPart, 0, len(spec.Columns)) + for seqno, c := range spec.Columns { + c, err := ColumnByRef(parent, c) + if err != nil { + return nil, nil + } + parts = append(parts, &schema.IndexPart{ + SeqNo: seqno, + C: c, + }) + } + return &schema.Index{ + Table: parent, + Parts: parts, + }, nil +} + +// linkForeignKeys creates the foreign keys defined in the Table's spec by creating references +// to column in the provided Schema. It is assumed that all tables referenced FK definitions in the spec +// are reachable from the provided schema or its connected realm. +func linkForeignKeys(tbl *schema.Table, sch *schema.Schema, table *sqlspec.Table) error { + for _, spec := range table.ForeignKeys { + fk := &schema.ForeignKey{Symbol: spec.Symbol, Table: tbl} + if spec.OnUpdate != nil { + fk.OnUpdate = schema.ReferenceOption(FromVar(spec.OnUpdate.V)) + } + if spec.OnDelete != nil { + fk.OnDelete = schema.ReferenceOption(FromVar(spec.OnDelete.V)) + } + if n, m := len(spec.Columns), len(spec.RefColumns); n != m { + return fmt.Errorf("sqlspec: number of referencing and referenced columns do not match for foreign-key %q", fk.Symbol) + } + for _, ref := range spec.Columns { + c, err := ColumnByRef(tbl, ref) + if err != nil { + return err + } + fk.Columns = append(fk.Columns, c) + } + for i, ref := range spec.RefColumns { + t, c, err := externalRef(ref, sch) + if isLocalRef(ref) { + t = fk.Table + c, err = ColumnByRef(fk.Table, ref) + } + if err != nil { + return err + } + if i > 0 && fk.RefTable != t { + return fmt.Errorf("sqlspec: more than 1 table was referenced for foreign-key %q", fk.Symbol) + } + fk.RefTable = t + fk.RefColumns = append(fk.RefColumns, c) + } + tbl.ForeignKeys = append(tbl.ForeignKeys, fk) + } + return nil +} + +// FromSchema converts a schema.Schema into sqlspec.Schema and []sqlspec.Table. +func FromSchema(s *schema.Schema, fn TableSpecFunc) (*sqlspec.Schema, []*sqlspec.Table, error) { + spec := &sqlspec.Schema{ + Name: s.Name, + } + tables := make([]*sqlspec.Table, 0, len(s.Tables)) + for _, t := range s.Tables { + table, err := fn(t) + if err != nil { + return nil, nil, err + } + if s.Name != "" { + table.Schema = SchemaRef(s.Name) + } + tables = append(tables, table) + } + return spec, tables, nil +} + +// FromTable converts a schema.Table to a sqlspec.Table. +func FromTable(t *schema.Table, colFn ColumnSpecFunc, pkFn PrimaryKeySpecFunc, idxFn IndexSpecFunc, + fkFn ForeignKeySpecFunc, ckFn CheckSpecFunc) (*sqlspec.Table, error) { + spec := &sqlspec.Table{ + Name: t.Name, + } + for _, c := range t.Columns { + col, err := colFn(c, t) + if err != nil { + return nil, err + } + spec.Columns = append(spec.Columns, col) + } + if t.PrimaryKey != nil { + pk, err := pkFn(t.PrimaryKey) + if err != nil { + return nil, err + } + spec.PrimaryKey = pk + } + for _, idx := range t.Indexes { + i, err := idxFn(idx) + if err != nil { + return nil, err + } + spec.Indexes = append(spec.Indexes, i) + } + for _, fk := range t.ForeignKeys { + f, err := fkFn(fk) + if err != nil { + return nil, err + } + spec.ForeignKeys = append(spec.ForeignKeys, f) + } + for _, attr := range t.Attrs { + if c, ok := attr.(*schema.Check); ok { + spec.Checks = append(spec.Checks, ckFn(c)) + } + } + convertCommentFromSchema(t.Attrs, &spec.Extra.Attrs) + return spec, nil +} + +// FromPrimaryKey converts schema.Index to a sqlspec.PrimaryKey. +func FromPrimaryKey(s *schema.Index) (*sqlspec.PrimaryKey, error) { + c := make([]*schemahcl.Ref, 0, len(s.Parts)) + for _, v := range s.Parts { + c = append(c, ColumnRef(v.C.Name)) + } + return &sqlspec.PrimaryKey{ + Columns: c, + }, nil +} + +// FromColumn converts a *schema.Column into a *sqlspec.Column using the ColumnTypeSpecFunc. +func FromColumn(col *schema.Column, columnTypeSpec ColumnTypeSpecFunc) (*sqlspec.Column, error) { + ct, err := columnTypeSpec(col.Type.Type) + if err != nil { + return nil, err + } + spec := &sqlspec.Column{ + Name: col.Name, + Type: ct.Type, + Null: col.Type.Null, + DefaultExtension: schemahcl.DefaultExtension{ + Extra: schemahcl.Resource{Attrs: ct.DefaultExtension.Extra.Attrs}, + }, + } + if col.Default != nil { + lv, err := ExprValue(col.Default) + if err != nil { + return nil, err + } + spec.Default = lv + } + convertCommentFromSchema(col.Attrs, &spec.Extra.Attrs) + return spec, nil +} + +// FromGenExpr returns the spec for a generated expression. +func FromGenExpr(x schema.GeneratedExpr, t func(string) string) *schemahcl.Resource { + return &schemahcl.Resource{ + Type: "as", + Attrs: []*schemahcl.Attr{ + schemahcl.StringAttr("expr", x.Expr), + VarAttr("type", t(x.Type)), + }, + } +} + +// ConvertGenExpr converts the "as" attribute or the block under the given resource. +func ConvertGenExpr(r *schemahcl.Resource, c *schema.Column, t func(string) string) error { + asA, okA := r.Attr("as") + asR, okR := r.Resource("as") + switch { + case okA && okR: + return fmt.Errorf("multiple as definitions for column %q", c.Name) + case okA: + expr, err := asA.String() + if err != nil { + return err + } + c.Attrs = append(c.Attrs, &schema.GeneratedExpr{ + Type: t(""), // default type. + Expr: expr, + }) + case okR: + var spec struct { + Expr string `spec:"expr"` + Type string `spec:"type"` + } + if err := asR.As(&spec); err != nil { + return err + } + c.Attrs = append(c.Attrs, &schema.GeneratedExpr{ + Expr: spec.Expr, + Type: t(spec.Type), + }) + } + return nil +} + +// ExprValue converts a schema.Expr to a cty.Value. +func ExprValue(expr schema.Expr) (cty.Value, error) { + switch x := expr.(type) { + case *schema.RawExpr: + return schemahcl.RawExprValue(&schemahcl.RawExpr{X: x.X}), nil + case *schema.Literal: + switch { + case oneOfPrefix(x.V, "0x", "0X", "0b", "0B", "b'", "B'", "x'", "X'"): + return schemahcl.RawExprValue(&schemahcl.RawExpr{X: x.V}), nil + case sqlx.IsQuoted(x.V, '\'', '"'): + // Normalize single quotes to double quotes. + s, err := sqlx.Unquote(x.V) + if err != nil { + return cty.NilVal, err + } + return cty.StringVal(s), nil + case strings.ToLower(x.V) == "true", strings.ToLower(x.V) == "false": + return cty.BoolVal(strings.ToLower(x.V) == "true"), nil + case strings.Contains(x.V, "."): + f, err := strconv.ParseFloat(x.V, 64) + if err != nil { + return cty.NilVal, err + } + return cty.NumberFloatVal(f), nil + case sqlx.IsLiteralNumber(x.V): + i, err := strconv.ParseInt(x.V, 10, 64) + if err != nil { + return cty.NilVal, err + } + return cty.NumberIntVal(i), nil + default: + return cty.NilVal, fmt.Errorf("unsupported literal value %q", x.V) + } + default: + return cty.NilVal, fmt.Errorf("converting expr %T to literal value", expr) + } +} + +// FromIndex converts schema.Index to sqlspec.Index. +func FromIndex(idx *schema.Index, partFns ...func(*schema.Index, *schema.IndexPart, *sqlspec.IndexPart) error) (*sqlspec.Index, error) { + spec := &sqlspec.Index{Name: idx.Name, Unique: idx.Unique} + convertCommentFromSchema(idx.Attrs, &spec.Extra.Attrs) + spec.Parts = make([]*sqlspec.IndexPart, len(idx.Parts)) + for i, p := range idx.Parts { + part := &sqlspec.IndexPart{Desc: p.Desc} + switch { + case p.C == nil && p.X == nil: + return nil, fmt.Errorf("missing column or expression for key part of index %q", idx.Name) + case p.C != nil && p.X != nil: + return nil, fmt.Errorf("multiple key part definitions for index %q", idx.Name) + case p.C != nil: + part.Column = ColumnRef(p.C.Name) + case p.X != nil: + x, ok := p.X.(*schema.RawExpr) + if !ok { + return nil, fmt.Errorf("unexpected expression %T for index %q", p.X, idx.Name) + } + part.Expr = x.X + } + for _, f := range partFns { + if err := f(idx, p, part); err != nil { + return nil, err + } + } + spec.Parts[i] = part + } + if parts, ok := columnsOnly(spec.Parts); ok { + spec.Parts = nil + spec.Columns = parts + return spec, nil + } + return spec, nil +} + +func columnsOnly(parts []*sqlspec.IndexPart) ([]*schemahcl.Ref, bool) { + columns := make([]*schemahcl.Ref, len(parts)) + for i, p := range parts { + if p.Desc || p.Column == nil || len(p.Extra.Attrs) != 0 { + return nil, false + } + columns[i] = p.Column + } + return columns, true +} + +// FromForeignKey converts schema.ForeignKey to sqlspec.ForeignKey. +func FromForeignKey(s *schema.ForeignKey) (*sqlspec.ForeignKey, error) { + c := make([]*schemahcl.Ref, 0, len(s.Columns)) + for _, v := range s.Columns { + c = append(c, ColumnRef(v.Name)) + } + r := make([]*schemahcl.Ref, 0, len(s.RefColumns)) + for _, v := range s.RefColumns { + ref := ColumnRef(v.Name) + if s.Table != s.RefTable { + ref = externalColRef(v.Name, s.RefTable.Name) + } + r = append(r, ref) + } + fk := &sqlspec.ForeignKey{ + Symbol: s.Symbol, + Columns: c, + RefColumns: r, + } + if s.OnUpdate != "" { + fk.OnUpdate = &schemahcl.Ref{V: Var(string(s.OnUpdate))} + } + if s.OnDelete != "" { + fk.OnDelete = &schemahcl.Ref{V: Var(string(s.OnDelete))} + } + return fk, nil +} + +// FromCheck converts schema.Check to sqlspec.Check. +func FromCheck(s *schema.Check) *sqlspec.Check { + return &sqlspec.Check{ + Name: s.Name, + Expr: s.Expr, + } +} + +// SchemaName returns the name from a ref to a schema. +func SchemaName(ref *schemahcl.Ref) (string, error) { + vs, err := ref.ByType("schema") + if err != nil { + return "", err + } + if len(vs) != 1 { + return "", fmt.Errorf("specutil: expected 1 schema ref, got %d", len(vs)) + } + return vs[0], nil +} + +// ColumnByRef returns a column from the table by its reference. +func ColumnByRef(t *schema.Table, ref *schemahcl.Ref) (*schema.Column, error) { + vs, err := ref.ByType("column") + if err != nil { + return nil, err + } + if len(vs) != 1 { + return nil, fmt.Errorf("specutil: expected 1 column ref, got %d", len(vs)) + } + c, ok := t.Column(vs[0]) + if !ok { + return nil, fmt.Errorf("specutil: unknown column %q in table %q", vs[0], t.Name) + } + return c, nil +} + +func externalRef(ref *schemahcl.Ref, sch *schema.Schema) (*schema.Table, *schema.Column, error) { + tbl, err := findTable(ref, sch) + if err != nil { + return nil, nil, err + } + c, err := ColumnByRef(tbl, ref) + if err != nil { + return nil, nil, err + } + return tbl, c, nil +} + +// findTable finds the table referenced by ref in the provided schema. If the table +// is not in the provided schema.Schema other schemas in the connected schema.Realm +// are searched as well. +func findTable(ref *schemahcl.Ref, sch *schema.Schema) (*schema.Table, error) { + qualifier, name, err := tableName(ref) + if err != nil { + return nil, err + } + var ( + matches []*schema.Table // Found references. + schemas []*schema.Schema // Schemas to search. + ) + switch { + case sch.Realm == nil || qualifier == sch.Name: + schemas = []*schema.Schema{sch} + case qualifier == "": + schemas = sch.Realm.Schemas + default: + s, ok := sch.Realm.Schema(qualifier) + if ok { + schemas = []*schema.Schema{s} + } + } + for _, s := range schemas { + t, ok := s.Table(name) + if ok { + matches = append(matches, t) + } + } + switch len(matches) { + case 1: + return matches[0], nil + case 0: + return nil, fmt.Errorf("sqlspec: table %q not found", name) + default: + return nil, fmt.Errorf("specutil: multiple tables found for %q", name) + } +} + +func tableName(ref *schemahcl.Ref) (qualifier, name string, err error) { + vs, err := ref.ByType("table") + if err != nil { + return "", "", err + } + switch len(vs) { + case 1: + name = vs[0] + case 2: + qualifier, name = vs[0], vs[1] + default: + return "", "", fmt.Errorf("sqlspec: unexpected number of references in %q", vs) + } + return +} + +func isLocalRef(r *schemahcl.Ref) bool { + return strings.HasPrefix(r.V, "$column") +} + +// ColumnRef returns the reference of a column by its name. +func ColumnRef(cName string) *schemahcl.Ref { + return schemahcl.BuildRef([]schemahcl.PathIndex{ + {T: "column", V: []string{cName}}, + }) +} + +func externalColRef(cName string, tName string) *schemahcl.Ref { + return schemahcl.BuildRef([]schemahcl.PathIndex{ + {T: "table", V: []string{tName}}, + {T: "column", V: []string{cName}}, + }) +} + +func qualifiedExternalColRef(cName, tName, sName string) *schemahcl.Ref { + return schemahcl.BuildRef([]schemahcl.PathIndex{ + {T: "table", V: []string{sName, tName}}, + {T: "column", V: []string{cName}}, + }) +} + +// SchemaRef returns the schemahcl.Ref to the schema with the given name. +func SchemaRef(name string) *schemahcl.Ref { + return schemahcl.BuildRef([]schemahcl.PathIndex{ + {T: "schema", V: []string{name}}, + }) +} + +// Attrer is the interface that wraps the Attr method. +type Attrer interface { + Attr(string) (*schemahcl.Attr, bool) +} + +// convertCommentFromSpec converts a spec comment attribute to a schema element attribute. +func convertCommentFromSpec(spec Attrer, attrs *[]schema.Attr) error { + if c, ok := spec.Attr("comment"); ok { + s, err := c.String() + if err != nil { + return err + } + *attrs = append(*attrs, &schema.Comment{Text: s}) + } + return nil +} + +// convertCommentFromSchema converts a schema element comment attribute to a spec comment attribute. +func convertCommentFromSchema(src []schema.Attr, trgt *[]*schemahcl.Attr) { + var c schema.Comment + if sqlx.Has(src, &c) { + *trgt = append(*trgt, schemahcl.StringAttr("comment", c.Text)) + } +} + +// ReferenceVars holds the HCL variables +// for foreign keys' referential-actions. +var ReferenceVars = []string{ + Var(string(schema.NoAction)), + Var(string(schema.Restrict)), + Var(string(schema.Cascade)), + Var(string(schema.SetNull)), + Var(string(schema.SetDefault)), +} + +// Var formats a string as variable to make it HCL compatible. +// The result is simple, replace each space with underscore. +func Var(s string) string { return strings.ReplaceAll(s, " ", "_") } + +// FromVar is the inverse function of Var. +func FromVar(s string) string { return strings.ReplaceAll(s, "_", " ") } + +func oneOfPrefix(s string, ps ...string) bool { + for _, p := range ps { + if strings.HasPrefix(s, p) { + return true + } + } + return false +} diff --git a/vendor/ariga.io/atlas/sql/internal/specutil/spec.go b/vendor/ariga.io/atlas/sql/internal/specutil/spec.go new file mode 100644 index 00000000..81034064 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/internal/specutil/spec.go @@ -0,0 +1,136 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package specutil + +import ( + "fmt" + + "ariga.io/atlas/schemahcl" + "ariga.io/atlas/sql/schema" + "ariga.io/atlas/sql/sqlspec" + + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/zclconf/go-cty/cty" +) + +// VarAttr is a helper method for constructing *schemahcl.Attr instances that contain a variable reference. +func VarAttr(k, v string) *schemahcl.Attr { + return schemahcl.RefAttr(k, &schemahcl.Ref{V: v}) +} + +type doc struct { + Tables []*sqlspec.Table `spec:"table"` + Schemas []*sqlspec.Schema `spec:"schema"` +} + +// Marshal marshals v into an Atlas DDL document using a schemahcl.Marshaler. Marshal uses the given +// schemaSpec function to convert a *schema.Schema into *sqlspec.Schema and []*sqlspec.Table. +func Marshal(v any, marshaler schemahcl.Marshaler, schemaSpec func(schem *schema.Schema) (*sqlspec.Schema, []*sqlspec.Table, error)) ([]byte, error) { + d := &doc{} + switch s := v.(type) { + case *schema.Schema: + spec, tables, err := schemaSpec(s) + if err != nil { + return nil, fmt.Errorf("specutil: failed converting schema to spec: %w", err) + } + d.Tables = tables + d.Schemas = []*sqlspec.Schema{spec} + case *schema.Realm: + for _, s := range s.Schemas { + spec, tables, err := schemaSpec(s) + if err != nil { + return nil, fmt.Errorf("specutil: failed converting schema to spec: %w", err) + } + d.Tables = append(d.Tables, tables...) + d.Schemas = append(d.Schemas, spec) + } + if err := QualifyDuplicates(d.Tables); err != nil { + return nil, err + } + if err := QualifyReferences(d.Tables, s); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("specutil: failed marshaling spec. %T is not supported", v) + } + return marshaler.MarshalSpec(d) +} + +// QualifyDuplicates sets the Qualified field equal to the schema name in any tables +// with duplicate names in the provided table specs. +func QualifyDuplicates(tableSpecs []*sqlspec.Table) error { + seen := make(map[string]*sqlspec.Table, len(tableSpecs)) + for _, tbl := range tableSpecs { + if s, ok := seen[tbl.Name]; ok { + schemaName, err := SchemaName(s.Schema) + if err != nil { + return err + } + s.Qualifier = schemaName + schemaName, err = SchemaName(tbl.Schema) + if err != nil { + return err + } + tbl.Qualifier = schemaName + } + seen[tbl.Name] = tbl + } + return nil +} + +// QualifyReferences qualifies any reference with qualifier. +func QualifyReferences(tableSpecs []*sqlspec.Table, realm *schema.Realm) error { + type cref struct{ s, t string } + byRef := make(map[cref]*sqlspec.Table) + for _, t := range tableSpecs { + r := cref{s: t.Qualifier, t: t.Name} + if byRef[r] != nil { + return fmt.Errorf("duplicate references were found for: %v", r) + } + byRef[r] = t + } + for _, t := range tableSpecs { + sname, err := SchemaName(t.Schema) + if err != nil { + return err + } + s1, ok := realm.Schema(sname) + if !ok { + return fmt.Errorf("schema %q was not found in realm", sname) + } + t1, ok := s1.Table(t.Name) + if !ok { + return fmt.Errorf("table %q.%q was not found in realm", sname, t.Name) + } + for _, fk := range t.ForeignKeys { + fk1, ok := t1.ForeignKey(fk.Symbol) + if !ok { + return fmt.Errorf("table %q.%q.%q was not found in realm", sname, t.Name, fk.Symbol) + } + for i, c := range fk.RefColumns { + if r, ok := byRef[cref{s: fk1.RefTable.Schema.Name, t: fk1.RefTable.Name}]; ok && r.Qualifier != "" { + fk.RefColumns[i] = qualifiedExternalColRef(fk1.RefColumns[i].Name, r.Name, r.Qualifier) + } else if r, ok := byRef[cref{t: fk1.RefTable.Name}]; ok && r.Qualifier == "" { + fk.RefColumns[i] = externalColRef(fk1.RefColumns[i].Name, r.Name) + } else { + return fmt.Errorf("missing reference for column %q in %q.%q.%q", c.V, sname, t.Name, fk.Symbol) + } + } + } + } + return nil +} + +// HCLBytesFunc returns a helper that evaluates an HCL document from a byte slice instead +// of from an hclparse.Parser instance. +func HCLBytesFunc(ev schemahcl.Evaluator) func(b []byte, v any, inp map[string]cty.Value) error { + return func(b []byte, v any, inp map[string]cty.Value) error { + parser := hclparse.NewParser() + if _, diag := parser.ParseHCL(b, ""); diag.HasErrors() { + return diag + } + return ev.Eval(parser, v, inp) + } +} diff --git a/vendor/ariga.io/atlas/sql/internal/sqlx/BUILD b/vendor/ariga.io/atlas/sql/internal/sqlx/BUILD new file mode 100644 index 00000000..5bd095ee --- /dev/null +++ b/vendor/ariga.io/atlas/sql/internal/sqlx/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sqlx", + srcs = [ + "dev.go", + "diff.go", + "exclude.go", + "plan.go", + "sqlx.go", + ], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/internal/sqlx", + importpath = "ariga.io/atlas/sql/internal/sqlx", + visibility = [ + "//third_party:__subpackages__", + "//vendor/ariga.io/atlas/sql:__subpackages__", + ], + deps = [ + "//vendor/ariga.io/atlas/sql/migrate", + "//vendor/ariga.io/atlas/sql/schema", + ], +) diff --git a/vendor/ariga.io/atlas/sql/internal/sqlx/dev.go b/vendor/ariga.io/atlas/sql/internal/sqlx/dev.go new file mode 100644 index 00000000..93736be2 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/internal/sqlx/dev.go @@ -0,0 +1,132 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlx + +import ( + "context" + "fmt" + "hash/fnv" + "time" + + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" +) + +// DevDriver is a driver that provides additional functionality +// to interact with the development database. +type DevDriver struct { + // A Driver connected to the dev database. + migrate.Driver + + // MaxNameLen configures the max length of object names in + // the connected database (e.g. 64 in MySQL). Longer names + // are trimmed and suffixed with their hash. + MaxNameLen int + + // DropClause holds optional clauses that + // can be added to the DropSchema change. + DropClause []schema.Clause + + // PatchColumn allows providing a custom function to patch + // columns that hold a schema reference. + PatchColumn func(*schema.Schema, *schema.Column) +} + +// NormalizeRealm implements the schema.Normalizer interface. +// +// The implementation converts schema objects in "natural form" (e.g. HCL or DSL) +// to their "normal presentation" in the database, by creating them temporarily in +// a "dev database", and then inspects them from there. +func (d *DevDriver) NormalizeRealm(ctx context.Context, r *schema.Realm) (nr *schema.Realm, err error) { + var ( + names = make(map[string]string) + changes = make([]schema.Change, 0, len(r.Schemas)) + reverse = make([]schema.Change, 0, len(r.Schemas)) + opts = &schema.InspectRealmOption{ + Schemas: make([]string, 0, len(r.Schemas)), + } + ) + for _, s := range r.Schemas { + if s.Realm != r { + s.Realm = r + } + dev := d.formatName(s.Name) + names[dev] = s.Name + s.Name = dev + opts.Schemas = append(opts.Schemas, s.Name) + // Skip adding the schema.IfNotExists clause + // to fail if the schema exists. + st := schema.New(dev).AddAttrs(s.Attrs...) + changes = append(changes, &schema.AddSchema{S: st}) + reverse = append(reverse, &schema.DropSchema{S: st, Extra: append(d.DropClause, &schema.IfExists{})}) + for _, t := range s.Tables { + // If objects are not strongly connected. + if t.Schema != s { + t.Schema = s + } + for _, c := range t.Columns { + if e, ok := c.Type.Type.(*schema.EnumType); ok && e.Schema != s { + e.Schema = s + } + if d.PatchColumn != nil { + d.PatchColumn(s, c) + } + } + changes = append(changes, &schema.AddTable{T: t}) + } + } + patch := func(r *schema.Realm) { + for _, s := range r.Schemas { + s.Name = names[s.Name] + } + } + // Delete the dev resources, and return + // the source realm to its initial state. + defer func() { + patch(r) + if rerr := d.ApplyChanges(ctx, reverse); rerr != nil { + if err != nil { + rerr = fmt.Errorf("%w: %v", err, rerr) + } + err = rerr + } + }() + if err := d.ApplyChanges(ctx, changes); err != nil { + return nil, err + } + if nr, err = d.InspectRealm(ctx, opts); err != nil { + return nil, err + } + patch(nr) + return nr, nil +} + +// NormalizeSchema returns the normal representation of the given database. See NormalizeRealm for more info. +func (d *DevDriver) NormalizeSchema(ctx context.Context, s *schema.Schema) (*schema.Schema, error) { + r := &schema.Realm{} + if s.Realm != nil { + r.Attrs = s.Realm.Attrs + } + r.Schemas = append(r.Schemas, s) + nr, err := d.NormalizeRealm(ctx, r) + if err != nil { + return nil, err + } + ns, ok := nr.Schema(s.Name) + if !ok { + return nil, fmt.Errorf("missing normalized schema %q", s.Name) + } + return ns, nil +} + +func (d *DevDriver) formatName(name string) string { + dev := fmt.Sprintf("atlas_dev_%s_%d", name, time.Now().Unix()) + if d.MaxNameLen == 0 || len(dev) <= d.MaxNameLen { + return dev + } + h := fnv.New128() + h.Write([]byte(dev)) + return fmt.Sprintf("%s_%x", dev[:d.MaxNameLen-1-h.Size()*2], h.Sum(nil)) +} diff --git a/vendor/ariga.io/atlas/sql/internal/sqlx/diff.go b/vendor/ariga.io/atlas/sql/internal/sqlx/diff.go new file mode 100644 index 00000000..a2ca6227 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/internal/sqlx/diff.go @@ -0,0 +1,577 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlx + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "ariga.io/atlas/sql/schema" +) + +type ( + // A Diff provides a generic schema.Differ for diffing schema elements. + // + // The DiffDriver is required for supporting database/dialect specific + // diff capabilities, like diffing custom types or attributes. + Diff struct { + DiffDriver + } + + // A DiffDriver wraps all required methods for diffing elements that may + // have database-specific diff logic. See sql/schema/mysql/diff.go for an + // implementation example. + DiffDriver interface { + // SchemaAttrDiff returns a changeset for migrating schema attributes + // from one state to the other. For example, changing schema collation. + SchemaAttrDiff(from, to *schema.Schema) []schema.Change + + // TableAttrDiff returns a changeset for migrating table attributes from + // one state to the other. For example, dropping or adding a `CHECK` constraint. + TableAttrDiff(from, to *schema.Table) ([]schema.Change, error) + + // ColumnChange returns the schema changes (if any) for migrating one column to the other. + ColumnChange(fromT *schema.Table, from, to *schema.Column) (schema.ChangeKind, error) + + // IndexAttrChanged reports if the index attributes were changed. + // For example, an index type or predicate (for partial indexes). + IndexAttrChanged(from, to []schema.Attr) bool + + // IndexPartAttrChanged reports if the part's attributes at position "i" + // were changed. For example, an index-part collation. + IndexPartAttrChanged(from, to *schema.Index, i int) bool + + // IsGeneratedIndexName reports if the index name was generated by the database + // for unnamed INDEX or UNIQUE constraints. In such cases, the Differ will look + // for unnamed schema.Indexes on the desired state, before tagging the index as + // a candidate for deletion. + IsGeneratedIndexName(*schema.Table, *schema.Index) bool + + // ReferenceChanged reports if the foreign key referential action was + // changed. For example, action was changed from RESTRICT to CASCADE. + ReferenceChanged(from, to schema.ReferenceOption) bool + } + + // A Normalizer wraps the Normalize method for normalizing the from and to tables before + // running diffing. The "from" usually represents the inspected database state (current), + // and the second represents the desired state. + // + // If the DiffDriver implements the Normalizer interface, TableDiff normalizes its table + // inputs before starting the diff process. + Normalizer interface { + Normalize(from, to *schema.Table) error + } + + // TableFinder wraps the FindTable method, providing more + // control to the DiffDriver on how tables are matched. + TableFinder interface { + FindTable(*schema.Schema, string) (*schema.Table, error) + } +) + +// RealmDiff implements the schema.Differ for Realm objects and returns a list of changes +// that need to be applied in order to move a database from the current state to the desired. +func (d *Diff) RealmDiff(from, to *schema.Realm) ([]schema.Change, error) { + var changes []schema.Change + // Drop or modify schema. + for _, s1 := range from.Schemas { + s2, ok := to.Schema(s1.Name) + if !ok { + changes = append(changes, &schema.DropSchema{S: s1}) + continue + } + change, err := d.SchemaDiff(s1, s2) + if err != nil { + return nil, err + } + changes = append(changes, change...) + } + // Add schemas. + for _, s1 := range to.Schemas { + if _, ok := from.Schema(s1.Name); ok { + continue + } + changes = append(changes, &schema.AddSchema{S: s1}) + for _, t := range s1.Tables { + changes = append(changes, &schema.AddTable{T: t}) + } + } + return changes, nil +} + +// SchemaDiff implements the schema.Differ interface and returns a list of +// changes that need to be applied in order to move from one state to the other. +func (d *Diff) SchemaDiff(from, to *schema.Schema) ([]schema.Change, error) { + if from.Name != to.Name { + return nil, fmt.Errorf("mismatched schema names: %q != %q", from.Name, to.Name) + } + var changes []schema.Change + // Drop or modify attributes (collations, charset, etc). + if change := d.SchemaAttrDiff(from, to); len(change) > 0 { + changes = append(changes, &schema.ModifySchema{ + S: to, + Changes: change, + }) + } + + // Drop or modify tables. + for _, t1 := range from.Tables { + switch t2, err := d.findTable(to, t1.Name); { + case schema.IsNotExistError(err): + changes = append(changes, &schema.DropTable{T: t1}) + case err != nil: + return nil, err + default: + change, err := d.tableDiff(t1, t2) + if err != nil { + return nil, err + } + if len(change) > 0 { + changes = append(changes, &schema.ModifyTable{ + T: t2, + Changes: change, + }) + } + } + } + // Add tables. + for _, t1 := range to.Tables { + switch _, err := d.findTable(from, t1.Name); { + case schema.IsNotExistError(err): + changes = append(changes, &schema.AddTable{T: t1}) + case err != nil: + return nil, err + } + } + return changes, nil +} + +// TableDiff implements the schema.TableDiffer interface and returns a list of +// changes that need to be applied in order to move from one state to the other. +func (d *Diff) TableDiff(from, to *schema.Table) ([]schema.Change, error) { + if from.Name != to.Name { + return nil, fmt.Errorf("mismatched table names: %q != %q", from.Name, to.Name) + } + return d.tableDiff(from, to) +} + +// tableDiff implements the table diffing but skips the table name check. +func (d *Diff) tableDiff(from, to *schema.Table) ([]schema.Change, error) { + // tableDiff can be called with non-identical + // names without affecting the diff process. + if name := from.Name; name != to.Name { + from.Name = to.Name + defer func() { from.Name = name }() + } + // Normalizing tables before starting the diff process. + if n, ok := d.DiffDriver.(Normalizer); ok { + if err := n.Normalize(from, to); err != nil { + return nil, err + } + } + var changes []schema.Change + // Drop or modify attributes (collations, checks, etc). + change, err := d.TableAttrDiff(from, to) + if err != nil { + return nil, err + } + changes = append(changes, change...) + + // Drop or modify columns. + for _, c1 := range from.Columns { + c2, ok := to.Column(c1.Name) + if !ok { + changes = append(changes, &schema.DropColumn{C: c1}) + continue + } + change, err := d.ColumnChange(from, c1, c2) + if err != nil { + return nil, err + } + if change != schema.NoChange { + changes = append(changes, &schema.ModifyColumn{ + From: c1, + To: c2, + Change: change, + }) + } + } + // Add columns. + for _, c1 := range to.Columns { + if _, ok := from.Column(c1.Name); !ok { + changes = append(changes, &schema.AddColumn{C: c1}) + } + } + + // Primary-key and index changes. + changes = append(changes, d.pkDiff(from, to)...) + changes = append(changes, d.indexDiff(from, to)...) + + // Drop or modify foreign-keys. + for _, fk1 := range from.ForeignKeys { + fk2, ok := to.ForeignKey(fk1.Symbol) + if !ok { + changes = append(changes, &schema.DropForeignKey{F: fk1}) + continue + } + if change := d.fkChange(fk1, fk2); change != schema.NoChange { + changes = append(changes, &schema.ModifyForeignKey{ + From: fk1, + To: fk2, + Change: change, + }) + } + } + // Add foreign-keys. + for _, fk1 := range to.ForeignKeys { + if _, ok := from.ForeignKey(fk1.Symbol); !ok { + changes = append(changes, &schema.AddForeignKey{F: fk1}) + } + } + return changes, nil +} + +// pkDiff returns the schema changes (if any) for migrating table +// primary-key from current state to the desired state. +func (d *Diff) pkDiff(from, to *schema.Table) (changes []schema.Change) { + switch pk1, pk2 := from.PrimaryKey, to.PrimaryKey; { + case pk1 == nil && pk2 != nil: + changes = append(changes, &schema.AddPrimaryKey{P: pk2}) + case pk1 != nil && pk2 == nil: + changes = append(changes, &schema.DropPrimaryKey{P: pk1}) + case pk1 != nil && pk2 != nil: + change := d.indexChange(pk1, pk2) + change &= ^schema.ChangeUnique + if change != schema.NoChange { + changes = append(changes, &schema.ModifyPrimaryKey{ + From: pk1, + To: pk2, + Change: change, + }) + } + } + return +} + +// indexDiff returns the schema changes (if any) for migrating table +// indexes from current state to the desired state. +func (d *Diff) indexDiff(from, to *schema.Table) []schema.Change { + var ( + changes []schema.Change + exists = make(map[*schema.Index]bool) + ) + // Drop or modify indexes. + for _, idx1 := range from.Indexes { + idx2, ok := to.Index(idx1.Name) + // Found directly. + if ok { + if change := d.indexChange(idx1, idx2); change != schema.NoChange { + changes = append(changes, &schema.ModifyIndex{ + From: idx1, + To: idx2, + Change: change, + }) + } + exists[idx2] = true + continue + } + // Found indirectly. + if d.IsGeneratedIndexName(from, idx1) { + if idx2, ok := d.similarUnnamedIndex(to, idx1); ok { + exists[idx2] = true + continue + } + } + // Not found. + changes = append(changes, &schema.DropIndex{I: idx1}) + } + // Add indexes. + for _, idx := range to.Indexes { + if exists[idx] { + continue + } + if _, ok := from.Index(idx.Name); !ok { + changes = append(changes, &schema.AddIndex{I: idx}) + } + } + return changes +} + +// indexChange returns the schema changes (if any) for migrating one index to the other. +func (d *Diff) indexChange(from, to *schema.Index) schema.ChangeKind { + var change schema.ChangeKind + if from.Unique != to.Unique { + change |= schema.ChangeUnique + } + if d.IndexAttrChanged(from.Attrs, to.Attrs) { + change |= schema.ChangeAttr + } + change |= d.partsChange(from, to) + change |= CommentChange(from.Attrs, to.Attrs) + return change +} + +func (d *Diff) partsChange(fromI, toI *schema.Index) schema.ChangeKind { + from, to := fromI.Parts, toI.Parts + if len(from) != len(to) { + return schema.ChangeParts + } + sort.Slice(to, func(i, j int) bool { return to[i].SeqNo < to[j].SeqNo }) + sort.Slice(from, func(i, j int) bool { return from[i].SeqNo < from[j].SeqNo }) + for i := range from { + switch { + case from[i].Desc != to[i].Desc || d.IndexPartAttrChanged(fromI, toI, i): + return schema.ChangeParts + case from[i].C != nil && to[i].C != nil: + if from[i].C.Name != to[i].C.Name { + return schema.ChangeParts + } + case from[i].X != nil && to[i].X != nil: + x1, x2 := from[i].X.(*schema.RawExpr).X, to[i].X.(*schema.RawExpr).X + if x1 != x2 && x1 != MayWrap(x2) { + return schema.ChangeParts + } + default: // (C1 != nil) != (C2 != nil) || (X1 != nil) != (X2 != nil). + return schema.ChangeParts + } + } + return schema.NoChange +} + +// fkChange returns the schema changes (if any) for migrating one index to the other. +func (d *Diff) fkChange(from, to *schema.ForeignKey) schema.ChangeKind { + var change schema.ChangeKind + switch { + case from.Table.Name != to.Table.Name: + change |= schema.ChangeRefTable | schema.ChangeRefColumn + case len(from.RefColumns) != len(to.RefColumns): + change |= schema.ChangeRefColumn + default: + for i := range from.RefColumns { + if from.RefColumns[i].Name != to.RefColumns[i].Name { + change |= schema.ChangeRefColumn + } + } + } + switch { + case len(from.Columns) != len(to.Columns): + change |= schema.ChangeColumn + default: + for i := range from.Columns { + if from.Columns[i].Name != to.Columns[i].Name { + change |= schema.ChangeColumn + } + } + } + if d.ReferenceChanged(from.OnUpdate, to.OnUpdate) { + change |= schema.ChangeUpdateAction + } + if d.ReferenceChanged(from.OnDelete, to.OnDelete) { + change |= schema.ChangeDeleteAction + } + return change +} + +// similarUnnamedIndex searches for an unnamed index with the same index-parts in the table. +func (d *Diff) similarUnnamedIndex(t *schema.Table, idx1 *schema.Index) (*schema.Index, bool) { + for _, idx2 := range t.Indexes { + if idx2.Name != "" || len(idx2.Parts) != len(idx1.Parts) || idx2.Unique != idx1.Unique { + continue + } + if d.partsChange(idx1, idx2) == schema.NoChange { + return idx2, true + } + } + return nil, false +} + +func (d *Diff) findTable(s *schema.Schema, name string) (*schema.Table, error) { + if f, ok := d.DiffDriver.(TableFinder); ok { + return f.FindTable(s, name) + } + t, ok := s.Table(name) + if !ok { + return nil, &schema.NotExistError{Err: fmt.Errorf("table %q was not found", name)} + } + return t, nil +} + +// CommentChange reports if the element comment was changed. +func CommentChange(from, to []schema.Attr) schema.ChangeKind { + var c1, c2 schema.Comment + if Has(from, &c1) != Has(to, &c2) || c1.Text != c2.Text { + return schema.ChangeComment + } + return schema.NoChange +} + +var ( + attrsType = reflect.TypeOf(([]schema.Attr)(nil)) + clausesType = reflect.TypeOf(([]schema.Clause)(nil)) + exprsType = reflect.TypeOf(([]schema.Expr)(nil)) +) + +// Has finds the first element in the elements list that +// matches target, and if so, sets target to that attribute +// value and returns true. +func Has(elements, target any) bool { + ev := reflect.ValueOf(elements) + if t := ev.Type(); t != attrsType && t != clausesType && t != exprsType { + panic(fmt.Sprintf("unexpected elements type: %T", elements)) + } + tv := reflect.ValueOf(target) + if tv.Kind() != reflect.Ptr || tv.IsNil() { + panic("target must be a non-nil pointer") + } + for i := 0; i < ev.Len(); i++ { + idx := ev.Index(i) + if idx.IsNil() { + continue + } + if e := idx.Elem(); e.Type().AssignableTo(tv.Type()) { + tv.Elem().Set(e.Elem()) + return true + } + } + return false +} + +// UnsupportedTypeError describes an unsupported type error. +type UnsupportedTypeError struct { + schema.Type +} + +func (e UnsupportedTypeError) Error() string { + return fmt.Sprintf("unsupported type %T", e.Type) +} + +// CommentDiff computes the comment diff between the 2 attribute list. +// Note that, the implementation relies on the fact that both PostgreSQL +// and MySQL treat empty comment as "no comment" and a way to clear comments. +func CommentDiff(from, to []schema.Attr) schema.Change { + var fromC, toC schema.Comment + switch fromHas, toHas := Has(from, &fromC), Has(to, &toC); { + case !fromHas && !toHas: + case !fromHas && toC.Text != "": + return &schema.AddAttr{ + A: &toC, + } + case !toHas: + // In MySQL, there is no way to DROP a comment. Instead, setting it to empty ('') + // will remove it from INFORMATION_SCHEMA. We use the same approach in PostgreSQL, + // because comments can be dropped either by setting them to NULL or empty string. + // See: postgres/backend/commands/comment.c#CreateComments. + return &schema.ModifyAttr{ + From: &fromC, + To: &toC, + } + default: + v1, err1 := Unquote(fromC.Text) + v2, err2 := Unquote(toC.Text) + if err1 == nil && err2 == nil && v1 != v2 { + return &schema.ModifyAttr{ + From: &fromC, + To: &toC, + } + } + } + return nil +} + +// CheckDiff computes the change diff between the 2 tables. A compare +// function is provided to check if a Check object was modified. +func CheckDiff(from, to *schema.Table, compare ...func(c1, c2 *schema.Check) bool) []schema.Change { + var changes []schema.Change + // Drop or modify checks. + for _, c1 := range checks(from.Attrs) { + switch c2, ok := similarCheck(to.Attrs, c1); { + case !ok: + changes = append(changes, &schema.DropCheck{ + C: c1, + }) + case len(compare) == 1 && !compare[0](c1, c2): + changes = append(changes, &schema.ModifyCheck{ + From: c1, + To: c2, + }) + } + } + // Add checks. + for _, c1 := range checks(to.Attrs) { + if _, ok := similarCheck(from.Attrs, c1); !ok { + changes = append(changes, &schema.AddCheck{ + C: c1, + }) + } + } + return changes +} + +// checks extracts all constraints from table attributes. +func checks(attr []schema.Attr) (checks []*schema.Check) { + for i := range attr { + if c, ok := attr[i].(*schema.Check); ok { + checks = append(checks, c) + } + } + return checks +} + +// similarCheck returns a CHECK by its constraints name or expression. +func similarCheck(attrs []schema.Attr, c *schema.Check) (*schema.Check, bool) { + var byName, byExpr *schema.Check + for i := 0; i < len(attrs) && (byName == nil || byExpr == nil); i++ { + check, ok := attrs[i].(*schema.Check) + if !ok { + continue + } + if check.Name != "" && check.Name == c.Name { + byName = check + } + if check.Expr == c.Expr { + byExpr = check + } + } + // Give precedence to constraint name. + if byName != nil { + return byName, true + } + if byExpr != nil { + return byExpr, true + } + return nil, false +} + +// Unquote single or double quotes. +func Unquote(s string) (string, error) { + switch { + case IsQuoted(s, '"'): + return strconv.Unquote(s) + case IsQuoted(s, '\''): + return strings.ReplaceAll(s[1:len(s)-1], "''", "'"), nil + default: + return s, nil + } +} + +// SingleQuote quotes the given string with single quote. +func SingleQuote(s string) (string, error) { + switch { + case IsQuoted(s, '\''): + return s, nil + case IsQuoted(s, '"'): + v, err := strconv.Unquote(s) + if err != nil { + return "", err + } + s = v + fallthrough + default: + return "'" + strings.ReplaceAll(s, "'", "''") + "'", nil + } +} diff --git a/vendor/ariga.io/atlas/sql/internal/sqlx/exclude.go b/vendor/ariga.io/atlas/sql/internal/sqlx/exclude.go new file mode 100644 index 00000000..d3c07547 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/internal/sqlx/exclude.go @@ -0,0 +1,169 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlx + +import ( + "encoding/csv" + "fmt" + "path/filepath" + "strings" + + "ariga.io/atlas/sql/schema" +) + +// ExcludeRealm filters resources in the realm based on the given patterns. +func ExcludeRealm(r *schema.Realm, patterns []string) (*schema.Realm, error) { + if len(patterns) == 0 { + return r, nil + } + var schemas []*schema.Schema + globs, err := split(patterns) + if err != nil { + return nil, err + } +Filter: + for _, s := range r.Schemas { + for i, g := range globs { + if len(g) > 3 { + return nil, fmt.Errorf("too many parts in pattern: %q", patterns[i]) + } + match, err := filepath.Match(g[0], s.Name) + if err != nil { + return nil, err + } + if match { + // In case there is a match, and it is + // a single glob we exclude this schema. + if len(g) == 1 { + continue Filter + } + if err := excludeS(s, g[1:]); err != nil { + return nil, err + } + } + } + schemas = append(schemas, s) + } + r.Schemas = schemas + return r, nil +} + +// ExcludeSchema filters resources in the schema based on the given patterns. +func ExcludeSchema(s *schema.Schema, patterns []string) (*schema.Schema, error) { + if len(patterns) == 0 { + return s, nil + } + if s.Realm == nil { + return nil, fmt.Errorf("missing realm for schema %q", s.Name) + } + for i, p := range patterns { + patterns[i] = fmt.Sprintf("%s.%s", s.Name, p) + } + if _, err := ExcludeRealm(s.Realm, patterns); err != nil { + return nil, err + } + return s, nil +} + +// split parses the list of patterns into chain of resource-globs. +// For example, 's*.t.*' is split to ['s*', 't', *]. +func split(patterns []string) ([][]string, error) { + globs := make([][]string, len(patterns)) + for i, p := range patterns { + r := csv.NewReader(strings.NewReader(p)) + r.Comma = '.' + switch parts, err := r.ReadAll(); { + case err != nil: + return nil, err + case len(parts) != 1: + return nil, fmt.Errorf("unexpected pattern: %q", p) + case len(parts[0]) == 0: + return nil, fmt.Errorf("empty pattern: %q", p) + default: + globs[i] = parts[0] + } + } + return globs, nil +} + +func excludeS(s *schema.Schema, glob []string) error { + var tables []*schema.Table + for _, t := range s.Tables { + match, err := filepath.Match(glob[0], t.Name) + if err != nil { + return err + } + if match { + // In case there is a match, and it is + // a single glob we exclude this table. + if len(glob) == 1 { + continue + } + if err := excludeT(t, glob[1]); err != nil { + return err + } + } + // No match or glob has more than one pattern. + tables = append(tables, t) + } + s.Tables = tables + return nil +} + +func excludeT(t *schema.Table, pattern string) (err error) { + ex := make(map[*schema.Index]struct{}) + ef := make(map[*schema.ForeignKey]struct{}) + t.Columns, err = filter(t.Columns, func(c *schema.Column) (bool, error) { + match, err := filepath.Match(pattern, c.Name) + if !match || err != nil { + return false, err + } + for _, idx := range c.Indexes { + ex[idx] = struct{}{} + } + for _, fk := range c.ForeignKeys { + ef[fk] = struct{}{} + } + return true, nil + }) + t.Indexes, err = filter(t.Indexes, func(idx *schema.Index) (bool, error) { + if _, ok := ex[idx]; ok { + return true, nil + } + return filepath.Match(pattern, idx.Name) + }) + t.ForeignKeys, err = filter(t.ForeignKeys, func(fk *schema.ForeignKey) (bool, error) { + if _, ok := ef[fk]; ok { + return true, nil + } + return filepath.Match(pattern, fk.Symbol) + }) + t.Attrs, err = filter(t.Attrs, func(a schema.Attr) (bool, error) { + c, ok := a.(*schema.Check) + if !ok { + return false, nil + } + match, err := filepath.Match(pattern, c.Name) + if !match || err != nil { + return false, err + } + return true, nil + }) + return +} + +func filter[T any](s []T, f func(T) (bool, error)) ([]T, error) { + r := make([]T, 0, len(s)) + for i := range s { + match, err := f(s[i]) + if err != nil { + return nil, err + } + if !match { + r = append(r, s[i]) + } + } + return r, nil +} diff --git a/vendor/ariga.io/atlas/sql/internal/sqlx/plan.go b/vendor/ariga.io/atlas/sql/internal/sqlx/plan.go new file mode 100644 index 00000000..33f9038f --- /dev/null +++ b/vendor/ariga.io/atlas/sql/internal/sqlx/plan.go @@ -0,0 +1,344 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlx + +import ( + "context" + "database/sql" + "errors" + "fmt" + "sort" + + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" +) + +type ( + execPlanner interface { + ExecContext(context.Context, string, ...any) (sql.Result, error) + PlanChanges(context.Context, string, []schema.Change, ...migrate.PlanOption) (*migrate.Plan, error) + } + // ApplyError is an error that exposes an information for getting + // how any changes were applied before encountering the failure. + ApplyError struct { + err string + applied int + } +) + +// Applied reports how many changes were applied before getting an error. +// In case the first change was failed, Applied() returns 0. +func (e *ApplyError) Applied() int { + return e.applied +} + +// Error implements the error interface. +func (e *ApplyError) Error() string { + return e.err +} + +// ApplyChanges is a helper used by the different drivers to apply changes. +func ApplyChanges(ctx context.Context, changes []schema.Change, p execPlanner, opts ...migrate.PlanOption) error { + plan, err := p.PlanChanges(ctx, "apply", changes, opts...) + if err != nil { + return err + } + for i, c := range plan.Changes { + if _, err := p.ExecContext(ctx, c.Cmd, c.Args...); err != nil { + if c.Comment != "" { + err = fmt.Errorf("%s: %w", c.Comment, err) + } + return &ApplyError{err: err.Error(), applied: i} + } + } + return nil +} + +// noRows implements the schema.ExecQuerier for migrate.Driver's without connections. +// This can be useful to always return no rows for queries, and block any execution. +type noRows struct{} + +// QueryContext implements the sqlx.ExecQuerier interface. +func (*noRows) QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) { + return nil, sql.ErrNoRows +} + +// ExecContext implements the sqlx.ExecQuerier interface. +func (*noRows) ExecContext(context.Context, string, ...interface{}) (sql.Result, error) { + return nil, errors.New("cannot execute statements without a database connection. use Open to create a new Driver") +} + +// NoRows to be used by differs and planners without a connection. +var NoRows schema.ExecQuerier = (*noRows)(nil) + +// SetReversible sets the Reversible field to +// true if all planned changes are reversible. +func SetReversible(p *migrate.Plan) error { + reversible := true + for _, c := range p.Changes { + stmts, err := c.ReverseStmts() + if err != nil { + return err + } + if len(stmts) == 0 { + reversible = false + } + } + p.Reversible = reversible + return nil +} + +// DetachCycles takes a list of schema changes, and detaches +// references between changes if there is at least one circular +// reference in the changeset. More explicitly, it postpones fks +// creation, or deletes fks before deletes their tables. +func DetachCycles(changes []schema.Change) ([]schema.Change, error) { + sorted, err := sortMap(changes) + if err == errCycle { + return detachReferences(changes), nil + } + if err != nil { + return nil, err + } + planned := make([]schema.Change, len(changes)) + copy(planned, changes) + sort.Slice(planned, func(i, j int) bool { + return sorted[table(planned[i])] < sorted[table(planned[j])] + }) + return planned, nil +} + +// detachReferences detaches all table references. +func detachReferences(changes []schema.Change) []schema.Change { + var planned, deferred []schema.Change + for _, change := range changes { + switch change := change.(type) { + case *schema.AddTable: + var ( + ext []schema.Change + self []*schema.ForeignKey + ) + for _, fk := range change.T.ForeignKeys { + if fk.RefTable == change.T { + self = append(self, fk) + } else { + ext = append(ext, &schema.AddForeignKey{F: fk}) + } + } + if len(ext) > 0 { + deferred = append(deferred, &schema.ModifyTable{T: change.T, Changes: ext}) + t := *change.T + t.ForeignKeys = self + change = &schema.AddTable{T: &t, Extra: change.Extra} + } + planned = append(planned, change) + case *schema.DropTable: + var fks []schema.Change + for _, fk := range change.T.ForeignKeys { + if fk.RefTable != change.T { + fks = append(fks, &schema.DropForeignKey{F: fk}) + } + } + if len(fks) > 0 { + planned = append(planned, &schema.ModifyTable{T: change.T, Changes: fks}) + t := *change.T + t.ForeignKeys = nil + change = &schema.DropTable{T: &t, Extra: change.Extra} + } + deferred = append(deferred, change) + case *schema.ModifyTable: + var fks, rest []schema.Change + for _, c := range change.Changes { + switch c := c.(type) { + case *schema.AddForeignKey: + fks = append(fks, c) + default: + rest = append(rest, c) + } + } + if len(fks) > 0 { + deferred = append(deferred, &schema.ModifyTable{T: change.T, Changes: fks}) + } + if len(rest) > 0 { + planned = append(planned, &schema.ModifyTable{T: change.T, Changes: rest}) + } + default: + planned = append(planned, change) + } + } + return append(planned, deferred...) +} + +// errCycle is an internal error to indicate a case of a cycle. +var errCycle = errors.New("cycle detected") + +// sortMap returns an index-map indicates the position of table in a topological +// sort in reversed order based on its references, and a boolean indicate if there +// is a non-self loop. +func sortMap(changes []schema.Change) (map[string]int, error) { + var ( + visit func(string) bool + sorted = make(map[string]int) + progress = make(map[string]bool) + deps, err = dependencies(changes) + ) + if err != nil { + return nil, err + } + visit = func(name string) bool { + if _, done := sorted[name]; done { + return false + } + if progress[name] { + return true + } + progress[name] = true + for _, ref := range deps[name] { + if visit(ref.Name) { + return true + } + } + delete(progress, name) + sorted[name] = len(sorted) + return false + } + for node := range deps { + if visit(node) { + return nil, errCycle + } + } + return sorted, nil +} + +// dependencies returned an adjacency list of all tables and the table they depend on +func dependencies(changes []schema.Change) (map[string][]*schema.Table, error) { + deps := make(map[string][]*schema.Table) + for _, change := range changes { + switch change := change.(type) { + case *schema.AddTable: + for _, fk := range change.T.ForeignKeys { + if err := checkFK(fk); err != nil { + return nil, err + } + if fk.RefTable != change.T { + deps[change.T.Name] = append(deps[change.T.Name], fk.RefTable) + } + } + case *schema.DropTable: + for _, fk := range change.T.ForeignKeys { + if err := checkFK(fk); err != nil { + return nil, err + } + if isDropped(changes, fk.RefTable) { + deps[fk.RefTable.Name] = append(deps[fk.RefTable.Name], fk.Table) + } + } + case *schema.ModifyTable: + for _, c := range change.Changes { + switch c := c.(type) { + case *schema.AddForeignKey: + if err := checkFK(c.F); err != nil { + return nil, err + } + if c.F.RefTable != change.T { + deps[change.T.Name] = append(deps[change.T.Name], c.F.RefTable) + } + case *schema.ModifyForeignKey: + if err := checkFK(c.To); err != nil { + return nil, err + } + if c.To.RefTable != change.T { + deps[change.T.Name] = append(deps[change.T.Name], c.To.RefTable) + } + case *schema.DropForeignKey: + if err := checkFK(c.F); err != nil { + return nil, err + } + if isDropped(changes, c.F.RefTable) { + deps[c.F.RefTable.Name] = append(deps[c.F.RefTable.Name], c.F.Table) + } + } + } + } + } + return deps, nil +} + +func checkFK(fk *schema.ForeignKey) error { + var cause []string + if fk.Table == nil { + cause = append(cause, "child table") + } + if len(fk.Columns) == 0 { + cause = append(cause, "child columns") + } + if fk.RefTable == nil { + cause = append(cause, "parent table") + } + if len(fk.RefColumns) == 0 { + cause = append(cause, "parent columns") + } + if len(cause) != 0 { + return fmt.Errorf("missing %q for foreign key: %q", cause, fk.Symbol) + } + return nil +} + +// table extracts a table from the given change. +func table(change schema.Change) (t string) { + switch change := change.(type) { + case *schema.AddTable: + t = change.T.Name + case *schema.DropTable: + t = change.T.Name + case *schema.ModifyTable: + t = change.T.Name + } + return +} + +// isDropped checks if the given table is marked as a deleted in the changeset. +func isDropped(changes []schema.Change, t *schema.Table) bool { + for _, c := range changes { + if c, ok := c.(*schema.DropTable); ok && c.T.Name == t.Name { + return true + } + } + return false +} + +// CheckChangesScope checks that changes can be applied +// on a schema scope (connection). +func CheckChangesScope(changes []schema.Change) error { + names := make(map[string]struct{}) + for _, c := range changes { + var t *schema.Table + switch c := c.(type) { + case *schema.AddSchema, *schema.ModifySchema, *schema.DropSchema: + return fmt.Errorf("%T is not allowed when migration plan is scoped to one schema", c) + case *schema.AddTable: + t = c.T + case *schema.ModifyTable: + t = c.T + case *schema.DropTable: + t = c.T + default: + continue + } + if t.Schema != nil && t.Schema.Name != "" { + names[t.Schema.Name] = struct{}{} + } + for _, c := range t.Columns { + e, ok := c.Type.Type.(*schema.EnumType) + if ok && e.Schema != nil && e.Schema.Name != "" { + names[t.Schema.Name] = struct{}{} + } + } + } + if len(names) > 1 { + return fmt.Errorf("found %d schemas when migration plan is scoped to one", len(names)) + } + return nil +} diff --git a/vendor/ariga.io/atlas/sql/internal/sqlx/sqlx.go b/vendor/ariga.io/atlas/sql/internal/sqlx/sqlx.go new file mode 100644 index 00000000..6ecc7c37 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/internal/sqlx/sqlx.go @@ -0,0 +1,530 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlx + +import ( + "bytes" + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "reflect" + "strconv" + "strings" + + "ariga.io/atlas/sql/schema" +) + +type ( + // ExecQueryCloser is the interface that groups + // Close with the schema.ExecQuerier methods. + ExecQueryCloser interface { + schema.ExecQuerier + io.Closer + } + nopCloser struct { + schema.ExecQuerier + } +) + +// Close implements the io.Closer interface. +func (nopCloser) Close() error { return nil } + +// SingleConn returns a closable single connection from the given ExecQuerier. +// If the ExecQuerier is already bound to a single connection (e.g. Tx, Conn), +// the connection will return as-is with a NopCloser. +func SingleConn(ctx context.Context, conn schema.ExecQuerier) (ExecQueryCloser, error) { + // A standard sql.DB or a wrapper of it. + if opener, ok := conn.(interface { + Conn(context.Context) (*sql.Conn, error) + }); ok { + return opener.Conn(ctx) + } + // Tx and Conn are bounded to a single connection. + // We use sql/driver.Tx to cover also custom Tx structs. + _, ok1 := conn.(driver.Tx) + _, ok2 := conn.(*sql.Conn) + if ok1 || ok2 { + return nopCloser{ExecQuerier: conn}, nil + } + return nil, fmt.Errorf("cannot obtain a single connection from %T", conn) +} + +// ValidString reports if the given string is not null and valid. +func ValidString(s sql.NullString) bool { + return s.Valid && s.String != "" && strings.ToLower(s.String) != "null" +} + +// ScanOne scans one record and closes the rows at the end. +func ScanOne(rows *sql.Rows, dest ...any) error { + defer rows.Close() + if !rows.Next() { + return sql.ErrNoRows + } + if err := rows.Scan(dest...); err != nil { + return err + } + return rows.Close() +} + +// ScanNullBool scans one sql.NullBool record and closes the rows at the end. +func ScanNullBool(rows *sql.Rows) (sql.NullBool, error) { + var b sql.NullBool + return b, ScanOne(rows, &b) +} + +// ScanStrings scans sql.Rows into a slice of strings and closes it at the end. +func ScanStrings(rows *sql.Rows) ([]string, error) { + defer rows.Close() + var vs []string + for rows.Next() { + var v string + if err := rows.Scan(&v); err != nil { + return nil, err + } + vs = append(vs, v) + } + return vs, nil +} + +// SchemaFKs scans the rows and adds the foreign-key to the schema table. +// Reference elements are added as stubs and should be linked manually by the +// caller. +func SchemaFKs(s *schema.Schema, rows *sql.Rows) error { + for rows.Next() { + var name, table, column, tSchema, refTable, refColumn, refSchema, updateRule, deleteRule string + if err := rows.Scan(&name, &table, &column, &tSchema, &refTable, &refColumn, &refSchema, &updateRule, &deleteRule); err != nil { + return err + } + t, ok := s.Table(table) + if !ok { + return fmt.Errorf("table %q was not found in schema", table) + } + fk, ok := t.ForeignKey(name) + if !ok { + fk = &schema.ForeignKey{ + Symbol: name, + Table: t, + RefTable: t, + OnDelete: schema.ReferenceOption(deleteRule), + OnUpdate: schema.ReferenceOption(updateRule), + } + switch { + case refTable == table: + case tSchema == refSchema: + if fk.RefTable, ok = s.Table(refTable); !ok { + fk.RefTable = &schema.Table{Name: refTable, Schema: s} + } + case tSchema != refSchema: + fk.RefTable = &schema.Table{Name: refTable, Schema: &schema.Schema{Name: refSchema}} + } + t.ForeignKeys = append(t.ForeignKeys, fk) + } + c, ok := t.Column(column) + if !ok { + return fmt.Errorf("column %q was not found for fk %q", column, fk.Symbol) + } + // Rows are ordered by ORDINAL_POSITION that specifies + // the position of the column in the FK definition. + if _, ok := fk.Column(c.Name); !ok { + fk.Columns = append(fk.Columns, c) + c.ForeignKeys = append(c.ForeignKeys, fk) + } + // Stub referenced columns or link if it's a self-reference. + var rc *schema.Column + if fk.Table != fk.RefTable { + rc = &schema.Column{Name: refColumn} + } else if c, ok := t.Column(refColumn); ok { + rc = c + } else { + return fmt.Errorf("referenced column %q was not found for fk %q", refColumn, fk.Symbol) + } + if _, ok := fk.RefColumn(rc.Name); !ok { + fk.RefColumns = append(fk.RefColumns, rc) + } + } + return nil +} + +// LinkSchemaTables links foreign-key stub tables/columns to actual elements. +func LinkSchemaTables(schemas []*schema.Schema) { + byName := make(map[string]map[string]*schema.Table) + for _, s := range schemas { + byName[s.Name] = make(map[string]*schema.Table) + for _, t := range s.Tables { + t.Schema = s + byName[s.Name][t.Name] = t + } + } + for _, s := range schemas { + for _, t := range s.Tables { + for _, fk := range t.ForeignKeys { + rs, ok := byName[fk.RefTable.Schema.Name] + if !ok { + continue + } + ref, ok := rs[fk.RefTable.Name] + if !ok { + continue + } + fk.RefTable = ref + for i, c := range fk.RefColumns { + rc, ok := ref.Column(c.Name) + if ok { + fk.RefColumns[i] = rc + } + } + } + } + } +} + +// ValuesEqual checks if the 2 string slices are equal (including their order). +func ValuesEqual(v1, v2 []string) bool { + if len(v1) != len(v2) { + return false + } + for i := range v1 { + if v1[i] != v2[i] { + return false + } + } + return true +} + +// ModeInspectSchema returns the InspectMode or its default. +func ModeInspectSchema(o *schema.InspectOptions) schema.InspectMode { + if o == nil || o.Mode == 0 { + return schema.InspectSchemas | schema.InspectTables + } + return o.Mode +} + +// ModeInspectRealm returns the InspectMode or its default. +func ModeInspectRealm(o *schema.InspectRealmOption) schema.InspectMode { + if o == nil || o.Mode == 0 { + return schema.InspectSchemas | schema.InspectTables + } + return o.Mode +} + +// A Builder provides a syntactic sugar API for writing SQL statements. +type Builder struct { + bytes.Buffer + QuoteChar byte // quoting identifiers + Schema *string // schema qualifier + Indent string // indentation string + level int // current indentation level +} + +// P writes a list of phrases to the builder separated and +// suffixed with whitespace. +func (b *Builder) P(phrases ...string) *Builder { + for _, p := range phrases { + if p == "" { + continue + } + if b.Len() > 0 && b.lastByte() != ' ' && b.lastByte() != '(' { + b.WriteByte(' ') + } + b.WriteString(p) + if p[len(p)-1] != ' ' { + b.WriteByte(' ') + } + } + return b +} + +// Ident writes the given string quoted as an SQL identifier. +func (b *Builder) Ident(s string) *Builder { + if s != "" { + b.WriteByte(b.QuoteChar) + b.WriteString(s) + b.WriteByte(b.QuoteChar) + b.WriteByte(' ') + } + return b +} + +// Table writes the table identifier to the builder, prefixed +// with the schema name if exists. +func (b *Builder) Table(t *schema.Table) *Builder { + switch { + // Custom qualifier. + case b.Schema != nil: + // Empty means skip prefix. + if *b.Schema != "" { + b.Ident(*b.Schema) + b.rewriteLastByte('.') + } + // Default schema qualifier. + case t.Schema != nil && t.Schema.Name != "": + b.Ident(t.Schema.Name) + b.rewriteLastByte('.') + } + b.Ident(t.Name) + return b +} + +// IndentIn adds one indentation in. +func (b *Builder) IndentIn() *Builder { + b.level++ + return b +} + +// IndentOut removed one indentation level. +func (b *Builder) IndentOut() *Builder { + b.level-- + return b +} + +// NL adds line break and prefix the new line with +// indentation in case indentation is enabled. +func (b *Builder) NL() *Builder { + if b.Indent != "" { + if b.lastByte() == ' ' { + b.rewriteLastByte('\n') + } else { + b.WriteByte('\n') + } + b.WriteString(strings.Repeat(b.Indent, b.level)) + } + return b +} + +// Comma writes a comma in case the buffer is not empty, or +// replaces the last char if it is a whitespace. +func (b *Builder) Comma() *Builder { + switch { + case b.Len() == 0: + case b.lastByte() == ' ': + b.rewriteLastByte(',') + b.WriteByte(' ') + default: + b.WriteString(", ") + } + return b +} + +// MapComma maps the slice x using the function f and joins the result with +// a comma separating between the written elements. +func (b *Builder) MapComma(x any, f func(i int, b *Builder)) *Builder { + s := reflect.ValueOf(x) + for i := 0; i < s.Len(); i++ { + if i > 0 { + b.Comma() + } + f(i, b) + } + return b +} + +// MapIndent is like MapComma, but writes a new line before each element. +func (b *Builder) MapIndent(x any, f func(i int, b *Builder)) *Builder { + return b.MapComma(x, func(i int, b *Builder) { + f(i, b.NL()) + }) +} + +// MapCommaErr is like MapComma, but returns an error if f returns an error. +func (b *Builder) MapCommaErr(x any, f func(i int, b *Builder) error) error { + s := reflect.ValueOf(x) + for i := 0; i < s.Len(); i++ { + if i > 0 { + b.Comma() + } + if err := f(i, b); err != nil { + return err + } + } + return nil +} + +// MapIndentErr is like MapCommaErr, but writes a new line before each element. +func (b *Builder) MapIndentErr(x any, f func(i int, b *Builder) error) error { + return b.MapCommaErr(x, func(i int, b *Builder) error { + return f(i, b.NL()) + }) +} + +// Wrap wraps the written string with parentheses. +func (b *Builder) Wrap(f func(b *Builder)) *Builder { + b.WriteByte('(') + f(b) + if b.lastByte() != ' ' { + b.WriteByte(')') + } else { + b.rewriteLastByte(')') + } + return b +} + +// WrapIndent is like Wrap but with extra level of indentation. +func (b *Builder) WrapIndent(f func(b *Builder)) *Builder { + return b.Wrap(func(b *Builder) { + b.IndentIn() + f(b) + b.IndentOut() + b.NL() + }) +} + +// Clone returns a duplicate of the builder. +func (b *Builder) Clone() *Builder { + return &Builder{ + QuoteChar: b.QuoteChar, + Buffer: *bytes.NewBufferString(b.Buffer.String()), + } +} + +// String overrides the Buffer.String method and ensure no spaces pad the returned statement. +func (b *Builder) String() string { + return strings.TrimSpace(b.Buffer.String()) +} + +func (b *Builder) lastByte() byte { + if b.Len() == 0 { + return 0 + } + buf := b.Buffer.Bytes() + return buf[len(buf)-1] +} + +func (b *Builder) rewriteLastByte(c byte) { + if b.Len() == 0 { + return + } + buf := b.Buffer.Bytes() + buf[len(buf)-1] = c +} + +// IsQuoted reports if the given string is quoted with one of the given quotes (e.g. ', ", `). +func IsQuoted(s string, q ...byte) bool { + last := len(s) - 1 + if last < 1 { + return false + } +Top: + for _, quote := range q { + if s[0] != quote || s[last] != quote { + continue + } + for i := 1; i < last-1; i++ { + switch c := s[i]; { + case c == '\\', c == quote && s[i+1] == quote: + i++ + // Accept only escaped quotes and reject otherwise. + case c == quote: + continue Top + } + } + return true + } + return false +} + +// IsLiteralBool reports if the given string is a valid literal bool. +func IsLiteralBool(s string) bool { + _, err := strconv.ParseBool(s) + return err == nil +} + +// IsLiteralNumber reports if the given string is a literal number. +func IsLiteralNumber(s string) bool { + // Hex digits. + if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { + // Some databases allow odd length hex string. + _, err := strconv.ParseUint(s[2:], 16, 64) + return err == nil + } + // Digits with optional exponent. + _, err := strconv.ParseFloat(s, 64) + return err == nil +} + +// DefaultValue returns the string represents the DEFAULT of a column. +func DefaultValue(c *schema.Column) (string, bool) { + switch x := c.Default.(type) { + case nil: + return "", false + case *schema.Literal: + return x.V, true + case *schema.RawExpr: + return x.X, true + default: + panic(fmt.Sprintf("unexpected default value type: %T", x)) + } +} + +// MayWrap ensures the given string is wrapped with parentheses. +// Used by the different drivers to turn strings valid expressions. +func MayWrap(s string) string { + n := len(s) - 1 + if len(s) < 2 || s[0] != '(' || s[n] != ')' || !balanced(s[1:n]) { + return "(" + s + ")" + } + return s +} + +func balanced(expr string) bool { + return ExprLastIndex(expr) == len(expr)-1 +} + +// ExprLastIndex scans the first expression in the given string until +// its end and returns its last index. +func ExprLastIndex(expr string) int { + var l, r int + for i := 0; i < len(expr); i++ { + Top: + switch expr[i] { + case '(': + l++ + case ')': + r++ + // String or identifier. + case '\'', '"', '`': + for j := i + 1; j < len(expr); j++ { + switch expr[j] { + case '\\': + j++ + case expr[i]: + i = j + break Top + } + } + // Unexpected EOS. + return -1 + } + // Balanced parens and we reached EOS or a terminator. + if l == r && (i == len(expr)-1 || expr[i+1] == ',') { + return i + } else if r > l { + return -1 + } + } + return -1 +} + +// ReverseChanges reverses the order of the changes. +func ReverseChanges(c []schema.Change) { + for i, n := 0, len(c); i < n/2; i++ { + c[i], c[n-i-1] = c[n-i-1], c[i] + } +} + +// P returns a pointer to v. +func P[T any](v T) *T { + return &v +} + +// V returns the value p is pointing to. +// If p is nil, the zero value is returned. +func V[T any](p *T) (v T) { + if p != nil { + v = *p + } + return +} diff --git a/vendor/ariga.io/atlas/sql/migrate/BUILD b/vendor/ariga.io/atlas/sql/migrate/BUILD new file mode 100644 index 00000000..a56bb396 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/migrate/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "migrate", + srcs = [ + "dir.go", + "lex.go", + "migrate.go", + ], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/migrate", + importpath = "ariga.io/atlas/sql/migrate", + visibility = ["//visibility:public"], + deps = ["//vendor/ariga.io/atlas/sql/schema"], +) diff --git a/vendor/ariga.io/atlas/sql/migrate/dir.go b/vendor/ariga.io/atlas/sql/migrate/dir.go new file mode 100644 index 00000000..da994851 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/migrate/dir.go @@ -0,0 +1,630 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package migrate + +import ( + "archive/tar" + "bufio" + "bytes" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "text/template" + "time" +) + +type ( + // Dir wraps the functionality used to interact with a migration directory. + Dir interface { + fs.FS + // WriteFile writes the data to the named file. + WriteFile(string, []byte) error + + // Files returns a set of files stored in this Dir to be executed on a database. + Files() ([]File, error) + + // Checksum returns a HashFile of the migration directory. + Checksum() (HashFile, error) + } + + // Formatter wraps the Format method. + Formatter interface { + // Format formats the given Plan into one or more migration files. + Format(*Plan) ([]File, error) + } + + // File represents a single migration file. + File interface { + // Name returns the name of the migration file. + Name() string + // Desc returns the description of the migration File. + Desc() string + // Version returns the version of the migration File. + Version() string + // Bytes returns the read content of the file. + Bytes() []byte + // Stmts returns the set of SQL statements this file holds. + Stmts() ([]string, error) + // StmtDecls returns the set of SQL statements this file holds alongside its preceding comments. + StmtDecls() ([]*Stmt, error) + } +) + +// LocalDir implements Dir for a local migration +// directory with default Atlas formatting. +type LocalDir struct { + path string +} + +var _ Dir = (*LocalDir)(nil) + +// NewLocalDir returns a new the Dir used by a Planner to work on the given local path. +func NewLocalDir(path string) (*LocalDir, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, fmt.Errorf("sql/migrate: %w", err) + } + if !fi.IsDir() { + return nil, fmt.Errorf("sql/migrate: %q is not a dir", path) + } + return &LocalDir{path: path}, nil +} + +// Path returns the local path used for opening this dir. +func (d *LocalDir) Path() string { + return d.path +} + +// Open implements fs.FS. +func (d *LocalDir) Open(name string) (fs.File, error) { + return os.Open(filepath.Join(d.path, name)) +} + +// WriteFile implements Dir.WriteFile. +func (d *LocalDir) WriteFile(name string, b []byte) error { + return os.WriteFile(filepath.Join(d.path, name), b, 0644) +} + +// Files implements Dir.Files. It looks for all files with .sql suffix and orders them by filename. +func (d *LocalDir) Files() ([]File, error) { + names, err := fs.Glob(d, "*.sql") + if err != nil { + return nil, err + } + // Sort files lexicographically. + sort.Slice(names, func(i, j int) bool { + return names[i] < names[j] + }) + ret := make([]File, len(names)) + for i, n := range names { + b, err := fs.ReadFile(d, n) + if err != nil { + return nil, fmt.Errorf("sql/migrate: read file %q: %w", n, err) + } + ret[i] = NewLocalFile(n, b) + } + return ret, nil +} + +// Checksum implements Dir.Checksum. By default, it calls Files() and creates a checksum from them. +func (d *LocalDir) Checksum() (HashFile, error) { + files, err := d.Files() + if err != nil { + return nil, err + } + return NewHashFile(files) +} + +// LocalFile is used by LocalDir to implement the Scanner interface. +type LocalFile struct { + n string + b []byte +} + +var _ File = (*LocalFile)(nil) + +// NewLocalFile returns a new local file. +func NewLocalFile(name string, data []byte) *LocalFile { + return &LocalFile{n: name, b: data} +} + +// Name implements File.Name. +func (f LocalFile) Name() string { + return f.n +} + +// Desc implements File.Desc. +func (f LocalFile) Desc() string { + parts := strings.SplitN(f.n, "_", 2) + if len(parts) == 1 { + return "" + } + return strings.TrimSuffix(parts[1], ".sql") +} + +// Version implements File.Version. +func (f LocalFile) Version() string { + return strings.SplitN(strings.TrimSuffix(f.n, ".sql"), "_", 2)[0] +} + +// Stmts returns the SQL statement exists in the local file. +func (f LocalFile) Stmts() ([]string, error) { + s, err := Stmts(string(f.b)) + if err != nil { + return nil, err + } + stmts := make([]string, len(s)) + for i := range s { + stmts[i] = s[i].Text + } + return stmts, nil +} + +// StmtDecls returns the all statement declarations exist in the local file. +func (f LocalFile) StmtDecls() ([]*Stmt, error) { + return Stmts(string(f.b)) +} + +// Bytes returns local file data. +func (f LocalFile) Bytes() []byte { + return f.b +} + +// Directive returns the (global) file directives that match the provided name. +// File directives are located at the top of the file and should not be associated with any +// statement. Hence, double new lines are used to separate file directives from its content. +func (f LocalFile) Directive(name string) (ds []string) { + var ( + comments []string + content = string(f.b) + ) + for strings.HasPrefix(content, "#") || strings.HasPrefix(content, "--") { + idx := strings.IndexByte(content, '\n') + if idx == -1 { + // Comments-only file. + comments = append(comments, content) + break + } + comments = append(comments, strings.TrimSpace(content[:idx])) + content = content[idx+1:] + } + // File directives are separated by + // double newlines from file content. + if !strings.HasPrefix(content, "\n") { + return nil + } + for _, c := range comments { + if d, ok := directive(c, name); ok { + ds = append(ds, d) + } + } + return ds +} + +type ( + // MemDir provides an in-memory Dir implementation. + MemDir struct { + files map[string]File + } + // An opened MemDir. + openedMem struct { + dir *MemDir + numUse int + } +) + +// A list of the opened memory-based directories. +var memDirs struct { + sync.Mutex + opened map[string]*openedMem +} + +// OpenMemDir opens an in-memory directory and registers it in the process namespace +// with the given name. Hence, calling OpenMemDir with the same name will return the +// same directory. The directory is deleted when the last reference of it is closed. +func OpenMemDir(name string) *MemDir { + memDirs.Lock() + defer memDirs.Unlock() + if m, ok := memDirs.opened[name]; ok { + m.numUse++ + return m.dir + } + if memDirs.opened == nil { + memDirs.opened = make(map[string]*openedMem) + } + memDirs.opened[name] = &openedMem{dir: &MemDir{}, numUse: 1} + return memDirs.opened[name].dir +} + +// Open implements fs.FS. +func (d *MemDir) Open(name string) (fs.File, error) { + f, ok := d.files[name] + if !ok { + return nil, fs.ErrNotExist + } + return &memFile{ + ReadCloser: io.NopCloser(bytes.NewReader(f.Bytes())), + }, nil +} + +// Close implements the io.Closer interface. +func (d *MemDir) Close() error { + memDirs.Lock() + defer memDirs.Unlock() + var opened string + for name, m := range memDirs.opened { + switch { + case m.dir != d: + case opened != "": + return fmt.Errorf("dir was opened with different names: %q and %q", opened, name) + default: + opened = name + if m.numUse--; m.numUse == 0 { + delete(memDirs.opened, name) + } + } + } + return nil +} + +// WriteFile adds a new file in-memory. +func (d *MemDir) WriteFile(name string, data []byte) error { + if d.files == nil { + d.files = make(map[string]File) + } + d.files[name] = NewLocalFile(name, data) + return nil +} + +// Files returns a set of files stored in-memory to be executed on a database. +func (d *MemDir) Files() ([]File, error) { + files := make([]File, 0, len(d.files)) + for _, f := range d.files { + if filepath.Ext(f.Name()) == ".sql" { + files = append(files, f) + } + } + sort.Slice(files, func(i, j int) bool { + return files[i].Name() < files[j].Name() + }) + return files, nil +} + +// Checksum implements Dir.Checksum. +func (d *MemDir) Checksum() (HashFile, error) { + files, err := d.Files() + if err != nil { + return nil, err + } + return NewHashFile(files) +} + +var ( + // templateFunc contains the template.FuncMap for the DefaultFormatter. + templateFuncs = template.FuncMap{ + "upper": strings.ToUpper, + "now": func() string { return time.Now().UTC().Format("20060102150405") }, + } + // DefaultFormatter is a default implementation for Formatter. + DefaultFormatter = TemplateFormatter{ + { + N: template.Must(template.New("").Funcs(templateFuncs).Parse( + "{{ with .Version }}{{ . }}{{ else }}{{ now }}{{ end }}{{ with .Name }}_{{ . }}{{ end }}.sql", + )), + C: template.Must(template.New("").Funcs(templateFuncs).Parse( + `{{ range .Changes }}{{ with .Comment }}{{ printf "-- %s%s\n" (slice . 0 1 | upper ) (slice . 1) }}{{ end }}{{ printf "%s;\n" .Cmd }}{{ end }}`, + )), + }, + } +) + +// TemplateFormatter implements Formatter by using templates. +type TemplateFormatter []struct{ N, C *template.Template } + +// NewTemplateFormatter creates a new Formatter working with the given templates. +// +// migrate.NewTemplateFormatter( +// template.Must(template.New("").Parse("{{now.Unix}}{{.Name}}.sql")), // name template +// template.Must(template.New("").Parse("{{range .Changes}}{{println .Cmd}}{{end}}")), // content template +// ) +func NewTemplateFormatter(templates ...*template.Template) (TemplateFormatter, error) { + if n := len(templates); n == 0 || n%2 == 1 { + return nil, fmt.Errorf("zero or odd number of templates given: %d", n) + } + t := make(TemplateFormatter, 0, len(templates)) + for i := 0; i < len(templates); i += 2 { + t = append(t, struct{ N, C *template.Template }{templates[i], templates[i+1]}) + } + return t, nil +} + +// Format implements the Formatter interface. +func (t TemplateFormatter) Format(plan *Plan) ([]File, error) { + files := make([]File, 0, len(t)) + for _, tpl := range t { + var n, b bytes.Buffer + if err := tpl.N.Execute(&n, plan); err != nil { + return nil, err + } + if err := tpl.C.Execute(&b, plan); err != nil { + return nil, err + } + files = append(files, &LocalFile{ + n: n.String(), + b: b.Bytes(), + }) + } + return files, nil +} + +// HashFileName of the migration directory integrity sum file. +const HashFileName = "atlas.sum" + +// HashFile represents the integrity sum file of the migration dir. +type HashFile []struct{ N, H string } + +// NewHashFile computes and returns a HashFile from the given directory's files. +func NewHashFile(files []File) (HashFile, error) { + var ( + hs HashFile + h = sha256.New() + ) + for _, f := range files { + if _, err := h.Write([]byte(f.Name())); err != nil { + return nil, err + } + // Check if this file contains an "atlas:sum" directive and if so, act to it. + if mode, ok := directive(string(f.Bytes()), directiveSum); ok && mode == sumModeIgnore { + continue + } + if _, err := h.Write(f.Bytes()); err != nil { + return nil, err + } + hs = append(hs, struct{ N, H string }{f.Name(), base64.StdEncoding.EncodeToString(h.Sum(nil))}) + } + return hs, nil +} + +// WriteSumFile writes the given HashFile to the Dir. If the file does not exist, it is created. +func WriteSumFile(dir Dir, sum HashFile) error { + b, err := sum.MarshalText() + if err != nil { + return err + } + return dir.WriteFile(HashFileName, b) +} + +// Sum returns the checksum of the represented hash file. +func (f HashFile) Sum() string { + sha := sha256.New() + for _, f := range f { + sha.Write([]byte(f.N)) + sha.Write([]byte(f.H)) + } + return base64.StdEncoding.EncodeToString(sha.Sum(nil)) +} + +// MarshalText implements encoding.TextMarshaler. +func (f HashFile) MarshalText() ([]byte, error) { + buf := new(bytes.Buffer) + for _, f := range f { + fmt.Fprintf(buf, "%s h1:%s\n", f.N, f.H) + } + return []byte(fmt.Sprintf("h1:%s\n%s", f.Sum(), buf)), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (f *HashFile) UnmarshalText(b []byte) error { + sc := bufio.NewScanner(bytes.NewReader(b)) + // The first line contains the sum. + sc.Scan() + sum := strings.TrimPrefix(sc.Text(), "h1:") + for sc.Scan() { + li := strings.SplitN(sc.Text(), "h1:", 2) + if len(li) != 2 { + return ErrChecksumFormat + } + *f = append(*f, struct{ N, H string }{strings.TrimSpace(li[0]), li[1]}) + } + if sum != f.Sum() { + return ErrChecksumMismatch + } + return sc.Err() +} + +// SumByName returns the hash for a migration file by its name. +func (f HashFile) SumByName(n string) (string, error) { + for _, f := range f { + if f.N == n { + return f.H, nil + } + } + return "", errors.New("checksum not found") +} + +var ( + // ErrChecksumFormat is returned from Validate if the sum files format is invalid. + ErrChecksumFormat = errors.New("checksum file format invalid") + // ErrChecksumMismatch is returned from Validate if the hash sums don't match. + ErrChecksumMismatch = errors.New("checksum mismatch") + // ErrChecksumNotFound is returned from Validate if the hash file does not exist. + ErrChecksumNotFound = errors.New("checksum file not found") +) + +// Validate checks if the migration dir is in sync with its sum file. +// If they don't match ErrChecksumMismatch is returned. +func Validate(dir Dir) error { + // If a migration directory implements the Validate() method, + // it will be used to determine the validity instead. + if v, ok := dir.(interface{ Validate() error }); ok { + return v.Validate() + } + fh, err := readHashFile(dir) + if errors.Is(err, fs.ErrNotExist) { + // If there are no migration files yet this is okay. + if files, err := dir.Files(); err != nil { + return err + } else if len(files) > 0 { + return ErrChecksumNotFound + } + return nil + } + if err != nil { + return err + } + mh, err := dir.Checksum() + if err != nil { + return err + } + if fh.Sum() != mh.Sum() { + return ErrChecksumMismatch + } + return nil +} + +// FilesLastIndex returns the index of the last file +// satisfying f(i), or -1 if none do. +func FilesLastIndex(files []File, f func(File) bool) int { + for i := len(files) - 1; i >= 0; i-- { + if f(files[i]) { + return i + } + } + return -1 +} + +const ( + // atlas:sum directive. + directiveSum = "sum" + sumModeIgnore = "ignore" + // atlas:delimiter directive. + directiveDelimiter = "delimiter" + directivePrefixSQL = "-- " +) + +var reDirective = regexp.MustCompile(`^([ -~]*)atlas:(\w+)(?: +([ -~]*))*`) + +// directive searches in the content a line that matches a directive +// with the given prefix and name. For example: +// +// directive(c, "delimiter", "-- ") // '-- atlas:delimiter.*' +// directive(c, "sum", "") // 'atlas:sum.*' +// directive(c, "sum") // '.*atlas:sum' +func directive(content, name string, prefix ...string) (string, bool) { + m := reDirective.FindStringSubmatch(content) + // In case the prefix was provided ensures it is matched. + if len(m) == 4 && m[2] == name && (len(prefix) == 0 || prefix[0] == m[1]) { + return m[3], true + } + return "", false +} + +// readHashFile reads the HashFile from the given Dir. +func readHashFile(dir Dir) (HashFile, error) { + f, err := dir.Open(HashFileName) + if err != nil { + return nil, err + } + defer f.Close() + b, err := io.ReadAll(f) + if err != nil { + return nil, err + } + var fh HashFile + if err := fh.UnmarshalText(b); err != nil { + return nil, err + } + return fh, nil +} + +// memFile implements the File interface for a file in memory. +type memFile struct{ io.ReadCloser } + +// Stat returns a zero FileInfo. +func (m *memFile) Stat() (fs.FileInfo, error) { return m, nil } +func (m *memFile) Name() string { return "" } +func (m *memFile) Size() int64 { return 0 } +func (m *memFile) Mode() fs.FileMode { return 0 } +func (m *memFile) ModTime() time.Time { return time.Time{} } +func (m *memFile) IsDir() bool { return false } +func (m *memFile) Sys() interface{} { return nil } + +// ArchiveDir returns a tar archive of the given directory. +func ArchiveDir(dir Dir) ([]byte, error) { + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + defer tw.Close() + sumF, err := dir.Open(HashFileName) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, err + } + if sumF != nil { + sumB, err := io.ReadAll(sumF) + if err != nil { + return nil, err + } + if err := append2Tar(tw, HashFileName, sumB); err != nil { + return nil, err + } + } + files, err := dir.Files() + if err != nil { + return nil, err + } + for _, f := range files { + if err := append2Tar(tw, f.Name(), f.Bytes()); err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} + +// UnarchiveDir extracts the tar archive into the given directory. +func UnarchiveDir(arc []byte) (Dir, error) { + var ( + md = &MemDir{} + tr = tar.NewReader(bytes.NewReader(arc)) + ) + for { + h, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + data, err := io.ReadAll(tr) + if err != nil { + return nil, err + } + if err := md.WriteFile(h.Name, data); err != nil { + return nil, err + } + } + return md, nil +} + +func append2Tar(tw *tar.Writer, name string, data []byte) error { + if err := tw.WriteHeader(&tar.Header{ + Name: name, + Mode: 0600, + Size: int64(len(data)), + }); err != nil { + return err + } + if _, err := tw.Write(data); err != nil { + return err + } + return nil +} diff --git a/vendor/ariga.io/atlas/sql/migrate/lex.go b/vendor/ariga.io/atlas/sql/migrate/lex.go new file mode 100644 index 00000000..b43674e4 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/migrate/lex.go @@ -0,0 +1,302 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package migrate + +import ( + "errors" + "fmt" + "io" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Stmt represents a scanned statement text along with its +// position in the file and associated comments group. +type Stmt struct { + Pos int // statement position + Text string // statement text + Comments []string // associated comments +} + +// Directive returns all directive comments with the given name. +// See: pkg.go.dev/cmd/compile#hdr-Compiler_Directives. +func (s *Stmt) Directive(name string) (ds []string) { + for _, c := range s.Comments { + switch { + case strings.HasPrefix(c, "/*") && !strings.Contains(c, "\n"): + if d, ok := directive(strings.TrimSuffix(c, "*/"), name, "/*"); ok { + ds = append(ds, d) + } + default: + for _, p := range []string{"#", "--", "-- "} { + if d, ok := directive(c, name, p); ok { + ds = append(ds, d) + } + } + } + } + return +} + +// Stmts provides a generic implementation for extracting SQL statements from the given file contents. +func Stmts(input string) ([]*Stmt, error) { + var stmts []*Stmt + l, err := newLex(input) + if err != nil { + return nil, err + } + for { + s, err := l.stmt() + if err == io.EOF { + return stmts, nil + } + if err != nil { + return nil, err + } + stmts = append(stmts, s) + } +} + +type lex struct { + input string + pos int // current phase position + total int // total bytes scanned so far + width int // size of latest rune + delim string // configured delimiter + comments []string // collected comments +} + +const ( + eos = -1 + delimiter = ";" + delimiterCmd = "delimiter" +) + +func newLex(input string) (*lex, error) { + l := &lex{input: input, delim: delimiter} + if d, ok := directive(input, directiveDelimiter, directivePrefixSQL); ok { + if err := l.setDelim(d); err != nil { + return nil, err + } + parts := strings.SplitN(input, "\n", 2) + if len(parts) == 1 { + return nil, l.error(l.pos, "no input found after delimiter %q", d) + } + l.input = parts[1] + } + return l, nil +} + +// Dollar-quoted string as defined by the PostgreSQL scanner. +var reDollarQuote = regexp.MustCompile(`^\$([A-Za-zÈ-ÿ_][\wÈ-ÿ]*)*\$`) + +func (l *lex) stmt() (*Stmt, error) { + var ( + depth, openingPos int + text string + ) + l.skipSpaces() +Scan: + for { + switch r := l.next(); { + case r == eos: + switch { + case depth > 0: + return nil, l.error(openingPos, "unclosed '('") + case l.pos > 0: + text = l.input + break Scan + default: + return nil, io.EOF + } + case r == '(': + if depth == 0 { + openingPos = l.pos + } + depth++ + case r == ')': + if depth == 0 { + return nil, l.error(l.pos, "unexpected ')'") + } + depth-- + case r == '\'', r == '"', r == '`': + if err := l.skipQuote(r); err != nil { + return nil, err + } + // Check if the start of the statement is the MySQL DELIMITER command. + // See https://dev.mysql.com/doc/refman/8.0/en/mysql-commands.html. + case l.pos == 1 && len(l.input) > len(delimiterCmd) && strings.EqualFold(l.input[:len(delimiterCmd)], delimiterCmd): + l.addPos(len(delimiterCmd) - 1) + if err := l.delimCmd(); err != nil { + return nil, err + } + // Delimiters take precedence over comments. + case depth == 0 && strings.HasPrefix(l.input[l.pos-l.width:], l.delim): + l.addPos(len(l.delim) - l.width) + text = l.input[:l.pos] + break Scan + case r == '$' && reDollarQuote.MatchString(l.input[l.pos-1:]): + if err := l.skipDollarQuote(); err != nil { + return nil, err + } + case r == '#': + l.comment("#", "\n") + case r == '-' && l.next() == '-': + l.comment("--", "\n") + case r == '/' && l.next() == '*': + l.comment("/*", "*/") + } + } + return l.emit(text), nil +} + +func (l *lex) next() rune { + if l.pos >= len(l.input) { + return eos + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = w + l.addPos(w) + return r +} + +func (l *lex) pick() rune { + p, w := l.pos, l.width + r := l.next() + l.pos, l.width = p, w + return r +} + +func (l *lex) addPos(p int) { + l.pos += p + l.total += p +} + +func (l *lex) skipQuote(quote rune) error { + pos := l.pos + for { + switch r := l.next(); { + case r == eos: + return l.error(pos, "unclosed quote %q", quote) + case r == '\\': + l.next() + case r == quote: + return nil + } + } +} + +func (l *lex) skipDollarQuote() error { + m := reDollarQuote.FindString(l.input[l.pos-1:]) + if m == "" { + return l.error(l.pos, "unexpected dollar quote") + } + l.addPos(len(m) - 1) + for { + switch r := l.next(); { + case r == eos: + // Fail only if a delimiter was not set. + if l.delim == "" { + return l.error(l.pos, "unclosed dollar-quoted string") + } + return nil + case r == '$' && strings.HasPrefix(l.input[l.pos-1:], m): + l.addPos(len(m) - 1) + return nil + } + } +} + +func (l *lex) comment(left, right string) { + i := strings.Index(l.input[l.pos:], right) + // Not a comment. + if i == -1 { + return + } + // If the comment reside inside a statement, collect it. + if l.pos != len(left) { + l.addPos(i + len(right)) + return + } + l.addPos(i + len(right)) + // If we did not scan any statement characters, it + // can be skipped and stored in the comments group. + l.comments = append(l.comments, l.input[:l.pos]) + l.input = l.input[l.pos:] + l.pos = 0 + // Double \n separate the comments group from the statement. + if strings.HasPrefix(l.input, "\n\n") || right == "\n" && strings.HasPrefix(l.input, "\n") { + l.comments = nil + } + l.skipSpaces() +} + +func (l *lex) skipSpaces() { + n := len(l.input) + l.input = strings.TrimLeftFunc(l.input, unicode.IsSpace) + l.total += n - len(l.input) +} + +func (l *lex) emit(text string) *Stmt { + s := &Stmt{Pos: l.total - len(text), Text: text, Comments: l.comments} + l.input = l.input[l.pos:] + l.pos = 0 + l.comments = nil + // Trim custom delimiter. + if l.delim != delimiter { + s.Text = strings.TrimSuffix(s.Text, l.delim) + } + s.Text = strings.TrimSpace(s.Text) + return s +} + +// delimCmd checks if the scanned "DELIMITER" +// text represents an actual delimiter command. +func (l *lex) delimCmd() error { + // A space must come after the delimiter. + if l.pick() != ' ' { + return nil + } + // Scan delimiter. + for r := l.pick(); r != eos && r != '\n'; r = l.next() { + } + delim := strings.TrimSpace(l.input[len(delimiterCmd):l.pos]) + // MySQL client allows quoting delimiters. + if strings.HasPrefix(delim, "'") && strings.HasSuffix(delim, "'") { + delim = strings.ReplaceAll(delim[1:len(delim)-1], "''", "'") + } + if err := l.setDelim(delim); err != nil { + return err + } + // Skip all we saw until now. + l.emit(l.input[:l.pos]) + return nil +} + +func (l *lex) setDelim(d string) error { + if d == "" { + return errors.New("empty delimiter") + } + // Unescape delimiters. e.g. "\\n" => "\n". + l.delim = strings.NewReplacer(`\n`, "\n", `\r`, "\r", `\t`, "\t").Replace(d) + return nil +} + +func (l *lex) error(pos int, format string, args ...any) error { + format = "%d:%d: " + format + var ( + s = l.input[:pos] + col = strings.LastIndex(s, "\n") + line = 1 + strings.Count(s, "\n") + ) + if line == 1 { + col = pos + } else { + col = pos - col - 1 + } + return fmt.Errorf(format, append([]any{line, col}, args...)...) +} diff --git a/vendor/ariga.io/atlas/sql/migrate/migrate.go b/vendor/ariga.io/atlas/sql/migrate/migrate.go new file mode 100644 index 00000000..f251dccb --- /dev/null +++ b/vendor/ariga.io/atlas/sql/migrate/migrate.go @@ -0,0 +1,975 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package migrate + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "strings" + "time" + + "ariga.io/atlas/sql/schema" +) + +type ( + // A Plan defines a planned changeset that its execution brings the database to + // the new desired state. Additional information is calculated by the different + // drivers to indicate if the changeset is transactional (can be rolled-back) and + // reversible (a down file can be generated to it). + Plan struct { + // Version and Name of the plan. Provided by the user or auto-generated. + Version, Name string + + // Reversible describes if the changeset is reversible. + Reversible bool + + // Transactional describes if the changeset is transactional. + Transactional bool + + // Changes defines the list of changeset in the plan. + Changes []*Change + } + + // A Change of migration. + Change struct { + // Cmd or statement to execute. + Cmd string + + // Args for placeholder parameters in the statement above. + Args []any + + // A Comment describes the change. + Comment string + + // Reverse contains the "reversed" statement(s) if + // the command is reversible. + Reverse any // string | []string + + // The Source that caused this change, or nil. + Source schema.Change + } +) + +// ReverseStmts returns the reverse statements of a Change, if any. +func (c *Change) ReverseStmts() (cmd []string, err error) { + switch r := c.Reverse.(type) { + case nil: + case string: + cmd = []string{r} + case []string: + cmd = r + default: + err = fmt.Errorf("sql/migrate: unexpected type %T for reverse commands", r) + } + return +} + +type ( + // The Driver interface must be implemented by the different dialects to support database + // migration authoring/planning and applying. ExecQuerier, Inspector and Differ, provide + // basic schema primitives for inspecting database schemas, calculate the difference between + // schema elements, and executing raw SQL statements. The PlanApplier interface wraps the + // methods for generating migration plan for applying the actual changes on the database. + Driver interface { + schema.Differ + schema.ExecQuerier + schema.Inspector + PlanApplier + } + + // PlanApplier wraps the methods for planning and applying changes + // on the database. + PlanApplier interface { + // PlanChanges returns a migration plan for applying the given changeset. + PlanChanges(context.Context, string, []schema.Change, ...PlanOption) (*Plan, error) + + // ApplyChanges is responsible for applying the given changeset. + // An error may return from ApplyChanges if the driver is unable + // to execute a change. + ApplyChanges(context.Context, []schema.Change, ...PlanOption) error + } + + // PlanOptions holds the migration plan options to be used by PlanApplier. + PlanOptions struct { + // PlanWithSchemaQualifier allows setting a custom schema to prefix + // tables and other resources. An empty string indicates no qualifier. + SchemaQualifier *string + // Indent is the string to use for indentation. + // If empty, no indentation is used. + Indent string + } + + // PlanOption allows configuring a drivers' plan using functional arguments. + PlanOption func(*PlanOptions) + + // StateReader wraps the method for reading a database/schema state. + // The types below provides a few builtin options for reading a state + // from a migration directory, a static object (e.g. a parsed file). + StateReader interface { + ReadState(ctx context.Context) (*schema.Realm, error) + } + + // The StateReaderFunc type is an adapter to allow the use of + // ordinary functions as state readers. + StateReaderFunc func(ctx context.Context) (*schema.Realm, error) +) + +// ReadState calls f(ctx). +func (f StateReaderFunc) ReadState(ctx context.Context) (*schema.Realm, error) { + return f(ctx) +} + +// ErrNoPlan is returned by Plan when there is no change between the two states. +var ErrNoPlan = errors.New("sql/migrate: no plan for matched states") + +// Realm returns a StateReader for the static Realm object. +func Realm(r *schema.Realm) StateReader { + return StateReaderFunc(func(context.Context) (*schema.Realm, error) { + return r, nil + }) +} + +// Schema returns a StateReader for the static Schema object. +func Schema(s *schema.Schema) StateReader { + return StateReaderFunc(func(context.Context) (*schema.Realm, error) { + r := &schema.Realm{Schemas: []*schema.Schema{s}} + if s.Realm != nil { + r.Attrs = s.Realm.Attrs + } + s.Realm = r + return r, nil + }) +} + +// RealmConn returns a StateReader for a Driver connected to a database. +func RealmConn(drv Driver, opts *schema.InspectRealmOption) StateReader { + return StateReaderFunc(func(ctx context.Context) (*schema.Realm, error) { + return drv.InspectRealm(ctx, opts) + }) +} + +// SchemaConn returns a StateReader for a Driver connected to a schema. +func SchemaConn(drv Driver, name string, opts *schema.InspectOptions) StateReader { + return StateReaderFunc(func(ctx context.Context) (*schema.Realm, error) { + s, err := drv.InspectSchema(ctx, name, opts) + if err != nil { + return nil, err + } + return Schema(s).ReadState(ctx) + }) +} + +type ( + // Planner can plan the steps to take to migrate from one state to another. It uses the enclosed Dir to write + // those changes to versioned migration files. + Planner struct { + drv Driver // driver to use + dir Dir // where migration files are stored and read from + fmt Formatter // how to format a plan to migration files + sum bool // whether to create a sum file for the migration directory + opts []PlanOption // driver options + } + + // PlannerOption allows managing a Planner using functional arguments. + PlannerOption func(*Planner) + + // A RevisionReadWriter wraps the functionality for reading and writing migration revisions in a database table. + RevisionReadWriter interface { + // Ident returns an object identifies this history table. + Ident() *TableIdent + // ReadRevisions returns all revisions. + ReadRevisions(context.Context) ([]*Revision, error) + // ReadRevision returns a revision by version. + // Returns ErrRevisionNotExist if the version does not exist. + ReadRevision(context.Context, string) (*Revision, error) + // WriteRevision saves the revision to the storage. + WriteRevision(context.Context, *Revision) error + // DeleteRevision deletes a revision by version from the storage. + DeleteRevision(context.Context, string) error + } + + // A Revision denotes an applied migration in a deployment. Used to track migration executions state of a database. + Revision struct { + Version string `json:"Version"` // Version of the migration. + Description string `json:"Description"` // Description of this migration. + Type RevisionType `json:"Type"` // Type of the migration. + Applied int `json:"Applied"` // Applied amount of statements in the migration. + Total int `json:"Total"` // Total amount of statements in the migration. + ExecutedAt time.Time `json:"ExecutedAt"` // ExecutedAt is the starting point of execution. + ExecutionTime time.Duration `json:"ExecutionTime"` // ExecutionTime of the migration. + Error string `json:"Error,omitempty"` // Error of the migration, if any occurred. + ErrorStmt string `json:"ErrorStmt,omitempty"` // ErrorStmt is the statement that raised Error. + Hash string `json:"-"` // Hash of migration file. + PartialHashes []string `json:"-"` // PartialHashes is the hashes of applied statements. + OperatorVersion string `json:"OperatorVersion"` // OperatorVersion that executed this migration. + } + + // RevisionType defines the type of the revision record in the history table. + RevisionType uint + + // Executor is responsible to manage and execute a set of migration files against a database. + Executor struct { + drv Driver // The Driver to access and manage the database. + dir Dir // The Dir with migration files to use. + rrw RevisionReadWriter // The RevisionReadWriter to read and write database revisions to. + log Logger // The Logger to use. + fromVer string // Calculate pending files from the given version (including it). + baselineVer string // Start the first migration after the given baseline version. + allowDirty bool // Allow start working on a non-clean database. + operator string // Revision.OperatorVersion + } + + // ExecutorOption allows configuring an Executor using functional arguments. + ExecutorOption func(*Executor) error +) + +const ( + // RevisionTypeUnknown represents an unknown revision type. + // This type is unexpected and exists here to only ensure + // the type is not set to the zero value. + RevisionTypeUnknown RevisionType = 0 + + // RevisionTypeBaseline represents a baseline revision. Note that only + // the first record can represent a baseline migration and most of its + // fields are set to the zero value. + RevisionTypeBaseline RevisionType = 1 << (iota - 1) + + // RevisionTypeExecute represents a migration that was executed. + RevisionTypeExecute + + // RevisionTypeResolved represents a migration that was resolved. A migration + // script that was script executed and then resolved should set its Type to + // RevisionTypeExecute | RevisionTypeResolved. + RevisionTypeResolved +) + +// Has returns if the given flag is set. +func (r RevisionType) Has(f RevisionType) bool { + return r&f != 0 +} + +// String implements fmt.Stringer. +func (r RevisionType) String() string { + switch { + case r == RevisionTypeBaseline: + return "baseline" + case r == RevisionTypeExecute: + return "applied" + case r == RevisionTypeResolved: + return "manually set" + case r == RevisionTypeExecute|RevisionTypeResolved: + return "applied + manually set" + default: + return "unknown" + } +} + +// MarshalText implements encoding.TextMarshaler. +func (r RevisionType) MarshalText() ([]byte, error) { + s := r.String() + if s == "unknown" { + return nil, fmt.Errorf("unexpected revision type '%v'", r) + } + return []byte(s), nil +} + +// NewPlanner creates a new Planner. +func NewPlanner(drv Driver, dir Dir, opts ...PlannerOption) *Planner { + p := &Planner{drv: drv, dir: dir, sum: true} + for _, opt := range opts { + opt(p) + } + if p.fmt == nil { + p.fmt = DefaultFormatter + } + return p +} + +// PlanWithSchemaQualifier allows setting a custom schema to prefix tables and +// other resources. An empty string indicates no prefix. +// +// Note, this options require the changes to be scoped to one +// schema and returns an error otherwise. +func PlanWithSchemaQualifier(q string) PlannerOption { + return func(p *Planner) { + p.opts = append(p.opts, func(o *PlanOptions) { + o.SchemaQualifier = &q + }) + } +} + +// PlanWithIndent allows generating SQL statements with indentation. +// An empty string indicates no indentation. +func PlanWithIndent(indent string) PlannerOption { + return func(p *Planner) { + p.opts = append(p.opts, func(o *PlanOptions) { + o.Indent = indent + }) + } +} + +// PlanFormat sets the Formatter of a Planner. +func PlanFormat(fmt Formatter) PlannerOption { + return func(p *Planner) { + p.fmt = fmt + } +} + +// PlanWithChecksum allows setting if the hash-sum functionality +// for the migration directory is enabled or not. +func PlanWithChecksum(b bool) PlannerOption { + return func(p *Planner) { + p.sum = b + } +} + +var ( + // WithFormatter calls PlanFormat. + // Deprecated: use PlanFormat instead. + WithFormatter = PlanFormat + // DisableChecksum calls PlanWithChecksum(false). + // Deprecated: use PlanWithoutChecksum instead. + DisableChecksum = func() PlannerOption { return PlanWithChecksum(false) } +) + +// Plan calculates the migration Plan required for moving the current state (from) state to +// the next state (to). A StateReader can be a directory, static schema elements or a Driver connection. +func (p *Planner) Plan(ctx context.Context, name string, to StateReader) (*Plan, error) { + return p.plan(ctx, name, to, true) +} + +// PlanSchema is like Plan but limits its scope to the schema connection. +// Note, the operation fails in case the connection was not set to a schema. +func (p *Planner) PlanSchema(ctx context.Context, name string, to StateReader) (*Plan, error) { + return p.plan(ctx, name, to, false) +} + +func (p *Planner) plan(ctx context.Context, name string, to StateReader, realmScope bool) (*Plan, error) { + from, err := NewExecutor(p.drv, p.dir, NopRevisionReadWriter{}) + if err != nil { + return nil, err + } + current, err := from.Replay(ctx, func() StateReader { + if realmScope { + return RealmConn(p.drv, nil) + } + // In case the scope is the schema connection, + // inspect it and return its connected realm. + return SchemaConn(p.drv, "", nil) + }()) + if err != nil { + return nil, err + } + desired, err := to.ReadState(ctx) + if err != nil { + return nil, err + } + var changes []schema.Change + switch { + case realmScope: + changes, err = p.drv.RealmDiff(current, desired) + default: + switch n, m := len(current.Schemas), len(desired.Schemas); { + case n == 0: + return nil, errors.New("no schema was found in current state after replaying migration directory") + case n > 1: + return nil, fmt.Errorf("%d schemas were found in current state after replaying migration directory", len(current.Schemas)) + case m == 0: + return nil, errors.New("no schema was found in desired state") + case m > 1: + return nil, fmt.Errorf("%d schemas were found in desired state; expect 1", len(desired.Schemas)) + default: + s1, s2 := *current.Schemas[0], *desired.Schemas[0] + // Avoid comparing schema names when scope is limited to one schema, + // and the schema qualifier is controlled by the caller. + if s1.Name != s2.Name { + s1.Name = s2.Name + } + changes, err = p.drv.SchemaDiff(&s1, &s2) + } + } + if err != nil { + return nil, err + } + if len(changes) == 0 { + return nil, ErrNoPlan + } + return p.drv.PlanChanges(ctx, name, changes, p.opts...) +} + +// WritePlan writes the given Plan to the Dir based on the configured Formatter. +func (p *Planner) WritePlan(plan *Plan) error { + // Format the plan into files. + files, err := p.fmt.Format(plan) + if err != nil { + return err + } + // Store the files in the migration directory. + for _, f := range files { + if err := p.dir.WriteFile(f.Name(), f.Bytes()); err != nil { + return err + } + } + // If enabled, update the sum file. + if p.sum { + sum, err := p.dir.Checksum() + if err != nil { + return err + } + return WriteSumFile(p.dir, sum) + } + return nil +} + +var ( + // ErrNoPendingFiles is returned if there are no pending migration files to execute on the managed database. + ErrNoPendingFiles = errors.New("sql/migrate: execute: nothing to do") + // ErrSnapshotUnsupported is returned if there is no Snapshoter given. + ErrSnapshotUnsupported = errors.New("sql/migrate: driver does not support taking a database snapshot") + // ErrCleanCheckerUnsupported is returned if there is no CleanChecker given. + ErrCleanCheckerUnsupported = errors.New("sql/migrate: driver does not support checking if database is clean") + // ErrRevisionNotExist is returned if the requested revision is not found in the storage. + ErrRevisionNotExist = errors.New("sql/migrate: revision not found") +) + +// MissingMigrationError is returned if a revision is partially applied but +// the matching migration file is not found in the migration directory. +type MissingMigrationError struct{ Version, Description string } + +// Error implements error. +func (e MissingMigrationError) Error() string { + return fmt.Sprintf( + "sql/migrate: missing migration: revision %q is partially applied but migration file was not found", + fmt.Sprintf("%s_%s.sql", e.Version, e.Description), + ) +} + +// NewExecutor creates a new Executor with default values. +func NewExecutor(drv Driver, dir Dir, rrw RevisionReadWriter, opts ...ExecutorOption) (*Executor, error) { + if drv == nil { + return nil, errors.New("sql/migrate: execute: no driver given") + } + if dir == nil { + return nil, errors.New("sql/migrate: execute: no dir given") + } + if rrw == nil { + return nil, errors.New("sql/migrate: execute: no revision storage given") + } + ex := &Executor{drv: drv, dir: dir, rrw: rrw} + for _, opt := range opts { + if err := opt(ex); err != nil { + return nil, err + } + } + if ex.log == nil { + ex.log = NopLogger{} + } + if _, ok := drv.(Snapshoter); !ok { + return nil, ErrSnapshotUnsupported + } + if _, ok := drv.(CleanChecker); !ok { + return nil, ErrCleanCheckerUnsupported + } + if ex.baselineVer != "" && ex.allowDirty { + return nil, errors.New("sql/migrate: execute: baseline and allow-dirty are mutually exclusive") + } + return ex, nil +} + +// WithAllowDirty defines if we can start working on a non-clean database +// in the first migration execution. +func WithAllowDirty(b bool) ExecutorOption { + return func(ex *Executor) error { + ex.allowDirty = b + return nil + } +} + +// WithBaselineVersion allows setting the baseline version of the database on the +// first migration. Hence, all versions up to and including this version are skipped. +func WithBaselineVersion(v string) ExecutorOption { + return func(ex *Executor) error { + ex.baselineVer = v + return nil + } +} + +// WithLogger sets the Logger of an Executor. +func WithLogger(log Logger) ExecutorOption { + return func(ex *Executor) error { + ex.log = log + return nil + } +} + +// WithFromVersion allows passing a file version as a starting point for calculating +// pending migration scripts. It can be useful for skipping specific files. +func WithFromVersion(v string) ExecutorOption { + return func(ex *Executor) error { + ex.fromVer = v + return nil + } +} + +// WithOperatorVersion sets the operator version to save on the revisions +// when executing migration files. +func WithOperatorVersion(v string) ExecutorOption { + return func(ex *Executor) error { + ex.operator = v + return nil + } +} + +// Pending returns all pending (not fully applied) migration files in the migration directory. +func (e *Executor) Pending(ctx context.Context) ([]File, error) { + // Don't operate with a broken migration directory. + if err := Validate(e.dir); err != nil { + return nil, fmt.Errorf("sql/migrate: execute: validate migration directory: %w", err) + } + // Read all applied database revisions. + revs, err := e.rrw.ReadRevisions(ctx) + if err != nil { + return nil, fmt.Errorf("sql/migrate: execute: read revisions: %w", err) + } + // Select the correct migration files. + migrations, err := e.dir.Files() + if err != nil { + return nil, fmt.Errorf("sql/migrate: execute: select migration files: %w", err) + } + if len(migrations) == 0 { + return nil, ErrNoPendingFiles + } + var pending []File + switch { + // If it is the first time we run. + case len(revs) == 0: + var cerr *NotCleanError + if err = e.drv.(CleanChecker).CheckClean(ctx, e.rrw.Ident()); err != nil && !errors.As(err, &cerr) { + return nil, err + } + // In case the workspace is not clean one of the flags is required. + if cerr != nil && !e.allowDirty && e.baselineVer == "" { + return nil, fmt.Errorf("%w. baseline version or allow-dirty is required", cerr) + } + pending = migrations + if e.baselineVer != "" { + baseline := FilesLastIndex(migrations, func(f File) bool { + return f.Version() == e.baselineVer + }) + if baseline == -1 { + return nil, fmt.Errorf("baseline version %q not found", e.baselineVer) + } + f := migrations[baseline] + // Mark the revision in the database as baseline revision. + if err := e.writeRevision(ctx, &Revision{Version: f.Version(), Description: f.Desc(), Type: RevisionTypeBaseline}); err != nil { + return nil, err + } + pending = migrations[baseline+1:] + } + // Not the first time we execute and a custom starting point was provided. + case e.fromVer != "": + idx := FilesLastIndex(migrations, func(f File) bool { + return f.Version() == e.fromVer + }) + if idx == -1 { + return nil, fmt.Errorf("starting point version %q not found in the migration directory", e.fromVer) + } + pending = migrations[idx:] + default: + var ( + last = revs[len(revs)-1] + partially = last.Applied != last.Total + fn = func(f File) bool { return f.Version() <= last.Version } + ) + if partially { + // If the last file is partially applied, we need to find the matching migration file in order to + // continue execution at the correct statement. + fn = func(f File) bool { return f.Version() == last.Version } + } + // Consider all migration files having a version < the latest revision version as pending. If the + // last revision is partially applied, it is considered pending as well. + idx := FilesLastIndex(migrations, fn) + if idx == -1 { + // If we cannot find the matching migration version for a partially applied migration, + // error out since we cannot determine how to proceed from here. + if partially { + return nil, &MissingMigrationError{last.Version, last.Description} + } + // All migrations have a higher version than the latest revision. Take every migration file as pending. + return migrations, nil + } + // If this file was not partially applied, take the next one. + if last.Applied == last.Total { + idx++ + } + pending = migrations[idx:] + } + if len(pending) == 0 { + return nil, ErrNoPendingFiles + } + return pending, nil +} + +// Execute executes the given migration file on the database. If it sees a file, that has been partially applied, it +// will continue with the next statement in line. +func (e *Executor) Execute(ctx context.Context, m File) (err error) { + hf, err := e.dir.Checksum() + if err != nil { + return fmt.Errorf("sql/migrate: execute: compute hash: %w", err) + } + hash, err := hf.SumByName(m.Name()) + if err != nil { + return fmt.Errorf("sql/migrate: execute: scanning checksum from %q: %w", m.Name(), err) + } + stmts, err := m.Stmts() + if err != nil { + return fmt.Errorf("sql/migrate: execute: scanning statements from %q: %w", m.Name(), err) + } + // Create checksums for the statements. + var ( + sums = make([]string, len(stmts)) + h = sha256.New() + ) + for i, stmt := range stmts { + if _, err := h.Write([]byte(stmt)); err != nil { + return err + } + sums[i] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + } + version := m.Version() + // If there already is a revision with this version in the database, + // and it is partially applied, continue where the last attempt was left off. + r, err := e.rrw.ReadRevision(ctx, version) + if err != nil && !errors.Is(err, ErrRevisionNotExist) { + return fmt.Errorf("sql/migrate: execute: read revision: %w", err) + } + if errors.Is(err, ErrRevisionNotExist) { + // Haven't seen this file before, create a new revision. + r = &Revision{ + Version: version, + Description: m.Desc(), + Type: RevisionTypeExecute, + Total: len(stmts), + Hash: hash, + } + } + // Save once to mark as started in the database. + if err = e.writeRevision(ctx, r); err != nil { + return err + } + // Make sure to store the Revision information. + defer func(ctx context.Context, e *Executor, r *Revision) { + if err2 := e.writeRevision(ctx, r); err2 != nil { + err = wrap(err2, err) + } + }(ctx, e, r) + if r.Applied > 0 { + // If the file has been applied partially before, check if the + // applied statements have not changed. + for i := 0; i < r.Applied; i++ { + if i > len(sums) || sums[i] != strings.TrimPrefix(r.PartialHashes[i], "h1:") { + err = HistoryChangedError{m.Name(), i + 1} + e.log.Log(LogError{Error: err}) + return err + } + } + } + e.log.Log(LogFile{m, r.Version, r.Description, r.Applied}) + for _, stmt := range stmts[r.Applied:] { + e.log.Log(LogStmt{stmt}) + if _, err = e.drv.ExecContext(ctx, stmt); err != nil { + e.log.Log(LogError{SQL: stmt, Error: err}) + r.done() + r.ErrorStmt = stmt + r.Error = err.Error() + return fmt.Errorf("sql/migrate: execute: executing statement %q from version %q: %w", stmt, r.Version, err) + } + r.PartialHashes = append(r.PartialHashes, "h1:"+sums[r.Applied]) + r.Applied++ + if err = e.writeRevision(ctx, r); err != nil { + return err + } + } + r.done() + return +} + +func (e *Executor) writeRevision(ctx context.Context, r *Revision) error { + r.ExecutedAt = time.Now() + r.OperatorVersion = e.operator + if err := e.rrw.WriteRevision(ctx, r); err != nil { + return fmt.Errorf("sql/migrate: execute: write revision: %w", err) + } + return nil +} + +// HistoryChangedError is returned if between two execution attempts already applied statements of a file have changed. +type HistoryChangedError struct { + File string + Stmt int +} + +func (e HistoryChangedError) Error() string { + return fmt.Sprintf("sql/migrate: execute: history changed: statement %d from file %q changed", e.Stmt, e.File) +} + +// ExecuteN executes n pending migration files. If n<=0 all pending migration files are executed. +func (e *Executor) ExecuteN(ctx context.Context, n int) (err error) { + pending, err := e.Pending(ctx) + if err != nil { + return err + } + if n > 0 { + if n >= len(pending) { + n = len(pending) + } + pending = pending[:n] + } + return e.exec(ctx, pending) +} + +// ExecuteTo executes all pending migration files up to and including version. +func (e *Executor) ExecuteTo(ctx context.Context, version string) (err error) { + pending, err := e.Pending(ctx) + if err != nil { + return err + } + // Strip pending files greater given version. + switch idx := FilesLastIndex(pending, func(file File) bool { + return file.Version() == version + }); idx { + case -1: + return fmt.Errorf("sql/migrate: execute: migration with version %q not found", version) + default: + pending = pending[:idx+1] + } + return e.exec(ctx, pending) +} + +func (e *Executor) exec(ctx context.Context, files []File) error { + revs, err := e.rrw.ReadRevisions(ctx) + if err != nil { + return fmt.Errorf("sql/migrate: execute: read revisions: %w", err) + } + LogIntro(e.log, revs, files) + for _, m := range files { + if err := e.Execute(ctx, m); err != nil { + return err + } + } + e.log.Log(LogDone{}) + return err +} + +type ( + replayConfig struct { + version string // to which version to replay (inclusive) + } + // ReplayOption configures a migration directory replay behavior. + ReplayOption func(*replayConfig) +) + +// ReplayToVersion configures the last version to apply when replaying the migration directory. +func ReplayToVersion(v string) ReplayOption { + return func(c *replayConfig) { + c.version = v + } +} + +// Replay the migration directory and invoke the state to get back the inspection result. +func (e *Executor) Replay(ctx context.Context, r StateReader, opts ...ReplayOption) (_ *schema.Realm, err error) { + c := &replayConfig{} + for _, opt := range opts { + opt(c) + } + // Clean up after ourselves. + restore, err := e.drv.(Snapshoter).Snapshot(ctx) + if err != nil { + return nil, fmt.Errorf("sql/migrate: taking database snapshot: %w", err) + } + defer func() { + if err2 := restore(ctx); err2 != nil { + err = wrap(err2, err) + } + }() + // Replay the migration directory on the database. + switch { + case c.version != "": + err = e.ExecuteTo(ctx, c.version) + default: + err = e.ExecuteN(ctx, 0) + } + if err != nil && !errors.Is(err, ErrNoPendingFiles) { + return nil, fmt.Errorf("sql/migrate: read migration directory state: %w", err) + } + return r.ReadState(ctx) +} + +type ( + // Snapshoter wraps the Snapshot method. + Snapshoter interface { + // Snapshot takes a snapshot of the current database state and returns a function that can be called to restore + // that state. Snapshot should return an error, if the current state can not be restored completely, e.g. if + // there is a table already containing some rows. + Snapshot(context.Context) (RestoreFunc, error) + } + + // RestoreFunc is returned by the Snapshoter to explicitly restore the database state. + RestoreFunc func(context.Context) error + + // TableIdent describes a table identifier returned by the revisions table. + TableIdent struct { + Name string // name of the table. + Schema string // optional schema. + } + + // CleanChecker wraps the single CheckClean method. + CleanChecker interface { + // CheckClean checks if the connected realm or schema does not contain any resources besides the + // revision history table. A NotCleanError is returned in case the connection is not-empty. + CheckClean(context.Context, *TableIdent) error + } + + // NotCleanError is returned when the connected dev-db is not in a clean state (aka it has schemas and tables). + // This check is done to ensure no data is lost by overriding it when working on the dev-db. + NotCleanError struct { + Reason string // reason why the database is considered not clean + } +) + +func (e *NotCleanError) Error() string { + return "sql/migrate: connected database is not clean: " + e.Reason +} + +// NopRevisionReadWriter is a RevisionReadWriter that does nothing. +// It is useful for one-time replay of the migration directory. +type NopRevisionReadWriter struct{} + +// Ident implements RevisionsReadWriter.TableIdent. +func (NopRevisionReadWriter) Ident() *TableIdent { + return nil +} + +// ReadRevisions implements RevisionsReadWriter.ReadRevisions. +func (NopRevisionReadWriter) ReadRevisions(context.Context) ([]*Revision, error) { + return nil, nil +} + +// ReadRevision implements RevisionsReadWriter.ReadRevision. +func (NopRevisionReadWriter) ReadRevision(context.Context, string) (*Revision, error) { + return nil, ErrRevisionNotExist +} + +// WriteRevision implements RevisionsReadWriter.WriteRevision. +func (NopRevisionReadWriter) WriteRevision(context.Context, *Revision) error { + return nil +} + +// DeleteRevision implements RevisionsReadWriter.DeleteRevision. +func (NopRevisionReadWriter) DeleteRevision(context.Context, string) error { + return nil +} + +var _ RevisionReadWriter = (*NopRevisionReadWriter)(nil) + +// done computes and sets the ExecutionTime. +func (r *Revision) done() { + r.ExecutionTime = time.Now().Sub(r.ExecutedAt) +} + +type ( + // A Logger logs migration execution. + Logger interface { + Log(LogEntry) + } + + // LogEntry marks several types of logs to be passed to a Logger. + LogEntry interface { + logEntry() + } + + // LogExecution is sent once when execution of multiple migration files has been started. + // It holds the filenames of the pending migration files. + LogExecution struct { + // From what version. + From string + // To what version. + To string + // Migration Files to be executed. + Files []File + } + + // LogFile is sent if a new migration file is executed. + LogFile struct { + // The File being executed. + File File + // Version executed. + // Deprecated: Use File.Version() instead. + Version string + // Desc of migration executed. + // Deprecated: Use File.Desc() instead. + Desc string + // Skip holds the number of stmts of this file that will be skipped. + // This happens, if a migration file was only applied partially and will now continue to be applied. + Skip int + } + + // LogStmt is sent if a new SQL statement is executed. + LogStmt struct { + SQL string + } + + // LogDone is sent if the execution is done. + LogDone struct{} + + // LogError is sent if there is an error while execution. + LogError struct { + SQL string // Set, if Error was caused by a SQL statement. + Error error + } + + // NopLogger is a Logger that does nothing. + // It is useful for one-time replay of the migration directory. + NopLogger struct{} +) + +func (LogExecution) logEntry() {} +func (LogFile) logEntry() {} +func (LogStmt) logEntry() {} +func (LogDone) logEntry() {} +func (LogError) logEntry() {} + +// Log implements the Logger interface. +func (NopLogger) Log(LogEntry) {} + +// LogIntro gathers some meta information from the migration files and stored +// revisions to log some general information prior to actual execution. +func LogIntro(l Logger, revs []*Revision, files []File) { + e := LogExecution{Files: files} + if len(revs) > 0 { + e.From = revs[len(revs)-1].Version + } + if len(files) > 0 { + e.To = files[len(files)-1].Version() + } + l.Log(e) +} + +// LogNoPendingFiles starts a new LogExecution and LogDone +// to indicate that there are no pending files to be executed. +func LogNoPendingFiles(l Logger, revs []*Revision) { + LogIntro(l, revs, nil) + l.Log(LogDone{}) +} + +func wrap(err1, err2 error) error { + if err2 != nil { + return fmt.Errorf("sql/migrate: %w: %v", err2, err1) + } + return err1 +} diff --git a/vendor/ariga.io/atlas/sql/migrate/testdata/migrate/atlas.sum b/vendor/ariga.io/atlas/sql/migrate/testdata/migrate/atlas.sum new file mode 100644 index 00000000..7660de17 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/migrate/testdata/migrate/atlas.sum @@ -0,0 +1,3 @@ +h1:M74RrNK69S2pj6C541LR1ew5O32/i0WoyNgsJmyuiUk= +1_initial.down.sql h1:0zypK43rgPbgvVUgVJABGN25VgM1QSeU+LJDBb8cEQI= +1_initial.up.sql h1:hFhs5XhRml4KTWGF5td6h1s7xNqAFnaEBbC5Y/NF7i4= diff --git a/vendor/ariga.io/atlas/sql/mysql/BUILD b/vendor/ariga.io/atlas/sql/mysql/BUILD new file mode 100644 index 00000000..e767650d --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "mysql", + srcs = [ + "convert.go", + "diff.go", + "driver.go", + "inspect.go", + "migrate.go", + "sqlspec.go", + "tidb.go", + ], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/mysql", + importpath = "ariga.io/atlas/sql/mysql", + visibility = ["//visibility:public"], + deps = [ + "//vendor/ariga.io/atlas/schemahcl", + "//vendor/ariga.io/atlas/sql/internal/specutil", + "//vendor/ariga.io/atlas/sql/internal/sqlx", + "//vendor/ariga.io/atlas/sql/migrate", + "//vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion", + "//vendor/ariga.io/atlas/sql/schema", + "//vendor/ariga.io/atlas/sql/sqlclient", + "//vendor/ariga.io/atlas/sql/sqlspec", + "//vendor/github.com/hashicorp/hcl/v2/hclparse", + "//vendor/github.com/zclconf/go-cty/cty", + ], +) diff --git a/vendor/ariga.io/atlas/sql/mysql/convert.go b/vendor/ariga.io/atlas/sql/mysql/convert.go new file mode 100644 index 00000000..1c882978 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/convert.go @@ -0,0 +1,264 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package mysql + +import ( + "fmt" + "strconv" + "strings" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/schema" +) + +// FormatType converts schema type to its column form in the database. +// An error is returned if the type cannot be recognized. +func FormatType(t schema.Type) (string, error) { + var f string + switch t := t.(type) { + case *BitType: + f = strings.ToLower(t.T) + if t.Size > 1 { + // The default size is 1. Thus, both + // BIT and BIT(1) are formatted as bit. + f += fmt.Sprintf("(%d)", t.Size) + } + case *schema.BoolType: + // Map all flavors to a single form. + switch f = strings.ToLower(t.T); f { + case TypeBool, TypeBoolean, TypeTinyInt, "tinyint(1)": + f = TypeBool + } + case *schema.BinaryType: + f = strings.ToLower(t.T) + // Accept 0 as a valid size, and avoid appending the default size of type BINARY. + if f == TypeVarBinary && t.Size != nil || f == TypeBinary && t.Size != nil && *t.Size != 1 { + f = fmt.Sprintf("%s(%d)", f, *t.Size) + } + case *schema.DecimalType: + if f = strings.ToLower(t.T); f != TypeDecimal && f != TypeNumeric { + return "", fmt.Errorf("mysql: unexpected decimal type: %q", t.T) + } + switch p, s := t.Precision, t.Scale; { + case p < 0 || s < 0: + return "", fmt.Errorf("mysql: decimal type must have precision > 0 and scale >= 0: %d, %d", p, s) + case p < s: + return "", fmt.Errorf("mysql: decimal type must have precision >= scale: %d < %d", p, s) + case p == 0 && s == 0: + // The default value for precision is 10 (i.e. decimal(0,0) = decimal(10)). + p = 10 + fallthrough + case s == 0: + // In standard SQL, the syntax DECIMAL(M) is equivalent to DECIMAL(M,0), + f = fmt.Sprintf("decimal(%d)", p) + default: + f = fmt.Sprintf("decimal(%d,%d)", p, s) + } + if t.Unsigned { + f += " unsigned" + } + case *schema.EnumType: + f = fmt.Sprintf("enum(%s)", formatValues(t.Values)) + case *schema.FloatType: + f = strings.ToLower(t.T) + // FLOAT with precision > 24, become DOUBLE. + // Also, REAL is a synonym for DOUBLE (if REAL_AS_FLOAT was not set). + if f == TypeFloat && t.Precision > 24 || f == TypeReal { + f = TypeDouble + } + if t.Unsigned { + f += " unsigned" + } + case *schema.IntegerType: + f = strings.ToLower(t.T) + if t.Unsigned { + f += " unsigned" + } + case *schema.JSONType: + f = strings.ToLower(t.T) + case *SetType: + f = fmt.Sprintf("set(%s)", formatValues(t.Values)) + case *schema.StringType: + f = strings.ToLower(t.T) + switch f { + case TypeChar: + // Not a single char. + if t.Size > 0 { + f += fmt.Sprintf("(%d)", t.Size) + } + case TypeVarchar: + // Zero is also a valid length. + f = fmt.Sprintf("varchar(%d)", t.Size) + } + case *schema.SpatialType: + f = strings.ToLower(t.T) + case *schema.TimeType: + f = strings.ToLower(t.T) + if p := t.Precision; p != nil && *p > 0 { + f = fmt.Sprintf("%s(%d)", f, *p) + } + case *schema.UUIDType: + f = strings.ToLower(t.T) + case *schema.UnsupportedType: + // Do not accept unsupported types as we should cover all cases. + return "", fmt.Errorf("unsupported type %q", t.T) + default: + return "", fmt.Errorf("invalid schema type %T", t) + } + return f, nil +} + +// ParseType returns the schema.Type value represented by the given raw type. +// The raw value is expected to follow the format in MySQL information schema. +func ParseType(raw string) (schema.Type, error) { + parts, size, unsigned, err := parseColumn(raw) + if err != nil { + return nil, err + } + switch t := parts[0]; t { + case TypeBit: + return &BitType{ + T: t, + Size: size, + }, nil + // bool and booleans are synonyms for + // tinyint with display-width set to 1. + case TypeBool, TypeBoolean: + return &schema.BoolType{ + T: TypeBool, + }, nil + case TypeTinyInt, TypeSmallInt, TypeMediumInt, TypeInt, TypeBigInt: + if size == 1 { + return &schema.BoolType{ + T: TypeBool, + }, nil + } + // For integer types, the size represents the display width and does not + // constrain the range of values that can be stored in the column. + // The storage byte-size is inferred from the type name (i.e TINYINT takes + // a single byte). + ft := &schema.IntegerType{ + T: t, + Unsigned: unsigned, + } + if attr := parts[len(parts)-1]; attr == "zerofill" && size != 0 { + ft.Attrs = []schema.Attr{ + &DisplayWidth{ + N: size, + }, + &ZeroFill{ + A: attr, + }, + } + } + return ft, nil + case TypeNumeric, TypeDecimal: + dt := &schema.DecimalType{ + T: t, + Unsigned: unsigned, + } + if len(parts) > 1 && parts[1] != "unsigned" { + if dt.Precision, err = strconv.Atoi(parts[1]); err != nil { + return nil, fmt.Errorf("parse precision %q", parts[1]) + } + } + if len(parts) > 2 && parts[2] != "unsigned" { + if dt.Scale, err = strconv.Atoi(parts[2]); err != nil { + return nil, fmt.Errorf("parse scale %q", parts[1]) + } + } + return dt, nil + case TypeFloat, TypeDouble, TypeReal: + ft := &schema.FloatType{ + T: t, + Unsigned: unsigned, + } + if len(parts) > 1 && parts[1] != "unsigned" { + if ft.Precision, err = strconv.Atoi(parts[1]); err != nil { + return nil, fmt.Errorf("parse precision %q", parts[1]) + } + } + return ft, nil + case TypeBinary, TypeVarBinary: + bt := &schema.BinaryType{T: t} + if len(parts) > 1 { + bt.Size = &size + } + return bt, nil + case TypeTinyBlob, TypeMediumBlob, TypeBlob, TypeLongBlob: + return &schema.BinaryType{ + T: t, + }, nil + case TypeChar, TypeVarchar: + return &schema.StringType{ + T: t, + Size: size, + }, nil + case TypeTinyText, TypeMediumText, TypeText, TypeLongText: + return &schema.StringType{ + T: t, + }, nil + case TypeEnum, TypeSet: + // Parse the enum values according to the MySQL format. + // github.com/mysql/mysql-server/blob/8.0/sql/field.cc#Field_enum::sql_type + rv := strings.TrimSuffix(strings.TrimPrefix(raw, t+"("), ")") + if rv == "" { + return nil, fmt.Errorf("mysql: unexpected enum type: %q", raw) + } + values := strings.Split(rv, "','") + for i := range values { + values[i] = strings.Trim(values[i], "'") + } + if t == TypeEnum { + return &schema.EnumType{ + T: TypeEnum, + Values: values, + }, nil + } + return &SetType{ + Values: values, + }, nil + case TypeDate, TypeDateTime, TypeTime, TypeTimestamp, TypeYear: + tt := &schema.TimeType{ + T: t, + } + if len(parts) > 1 { + p, err := strconv.Atoi(parts[1]) + if err != nil { + return nil, fmt.Errorf("parse precision %q", parts[1]) + } + tt.Precision = &p + } + return tt, nil + case TypeJSON: + return &schema.JSONType{ + T: t, + }, nil + case TypePoint, TypeMultiPoint, TypeLineString, TypeMultiLineString, TypePolygon, TypeMultiPolygon, TypeGeometry, TypeGeoCollection, TypeGeometryCollection: + return &schema.SpatialType{ + T: t, + }, nil + case TypeUUID: + return &schema.UUIDType{ + T: t, + }, nil + default: + return &schema.UnsupportedType{ + T: t, + }, nil + } +} + +// formatValues formats ENUM and SET values. +func formatValues(vs []string) string { + values := make([]string, len(vs)) + for i := range vs { + values[i] = vs[i] + if !sqlx.IsQuoted(values[i], '"', '\'') { + values[i] = "'" + values[i] + "'" + } + } + return strings.Join(values, ",") +} diff --git a/vendor/ariga.io/atlas/sql/mysql/diff.go b/vendor/ariga.io/atlas/sql/mysql/diff.go new file mode 100644 index 00000000..cf8f5649 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/diff.go @@ -0,0 +1,637 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package mysql + +import ( + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/schema" +) + +// DefaultDiff provides basic diffing capabilities for MySQL dialects. +// Note, it is recommended to call Open, create a new Driver and use its +// Differ when a database connection is available. +var DefaultDiff schema.Differ = &sqlx.Diff{DiffDriver: &diff{conn: noConn}} + +// A diff provides a MySQL implementation for sqlx.DiffDriver. +type diff struct { + conn + // charset to collation mapping. + // See, internal directory. + ch2co, co2ch struct { + sync.Once + v map[string]string + err error + } +} + +// SchemaAttrDiff returns a changeset for migrating schema attributes from one state to the other. +func (d *diff) SchemaAttrDiff(from, to *schema.Schema) []schema.Change { + var ( + topAttr []schema.Attr + changes []schema.Change + ) + if from.Realm != nil { + topAttr = from.Realm.Attrs + } + // Charset change. + if change := d.charsetChange(from.Attrs, topAttr, to.Attrs); change != noChange { + changes = append(changes, change) + } + // Collation change. + if change := d.collationChange(from.Attrs, topAttr, to.Attrs); change != noChange { + changes = append(changes, change) + } + return changes +} + +// TableAttrDiff returns a changeset for migrating table attributes from one state to the other. +func (d *diff) TableAttrDiff(from, to *schema.Table) ([]schema.Change, error) { + var changes []schema.Change + if change := d.autoIncChange(from.Attrs, to.Attrs); change != noChange { + changes = append(changes, change) + } + if change := sqlx.CommentDiff(from.Attrs, to.Attrs); change != nil { + changes = append(changes, change) + } + if change := d.charsetChange(from.Attrs, from.Schema.Attrs, to.Attrs); change != noChange { + changes = append(changes, change) + } + if change := d.collationChange(from.Attrs, from.Schema.Attrs, to.Attrs); change != noChange { + changes = append(changes, change) + } + if !d.SupportsCheck() && sqlx.Has(to.Attrs, &schema.Check{}) { + return nil, fmt.Errorf("version %q does not support CHECK constraints", d.V) + } + // For MariaDB, we skip JSON CHECK constraints that were created by the databases, + // or by Atlas for older versions. These CHECK constraints (inlined on the columns) + // also cannot be dropped using "DROP CONSTRAINTS", but can be modified and dropped + // using "MODIFY COLUMN". + var checks []schema.Change + for _, c := range sqlx.CheckDiff(from, to, func(c1, c2 *schema.Check) bool { + return enforced(c1.Attrs) == enforced(c2.Attrs) + }) { + drop, ok := c.(*schema.DropCheck) + if !ok || !strings.HasPrefix(drop.C.Expr, "json_valid") { + checks = append(checks, c) + continue + } + // Generated CHECK have the form of "json_valid(``)" + // and named as the column. + if _, ok := to.Column(drop.C.Name); !ok { + checks = append(checks, c) + } + } + return append(changes, checks...), nil +} + +// ColumnChange returns the schema changes (if any) for migrating one column to the other. +func (d *diff) ColumnChange(fromT *schema.Table, from, to *schema.Column) (schema.ChangeKind, error) { + change := sqlx.CommentChange(from.Attrs, to.Attrs) + if from.Type.Null != to.Type.Null { + change |= schema.ChangeNull + } + changed, err := d.typeChanged(from, to) + if err != nil { + return schema.NoChange, err + } + if changed { + change |= schema.ChangeType + } + if changed, err = d.defaultChanged(from, to); err != nil { + return schema.NoChange, err + } + if changed { + change |= schema.ChangeDefault + } + if changed, err = d.generatedChanged(from, to); err != nil { + return schema.NoChange, err + } + if changed { + change |= schema.ChangeGenerated + } + if changed, err = d.columnCharsetChanged(fromT, from, to); err != nil { + return schema.NoChange, err + } + if changed { + change |= schema.ChangeCharset + } + if changed, err = d.columnCollateChanged(fromT, from, to); err != nil { + return schema.NoChange, err + } + if changed { + change |= schema.ChangeCollate + } + return change, nil +} + +// IsGeneratedIndexName reports if the index name was generated by the database. +func (d *diff) IsGeneratedIndexName(_ *schema.Table, idx *schema.Index) bool { + // Auto-generated index names for functional/expression indexes. See. + // mysql-server/sql/sql_table.cc#add_functional_index_to_create_list + const f = "functional_index" + switch { + case d.SupportsIndexExpr() && idx.Name == f: + return true + case d.SupportsIndexExpr() && strings.HasPrefix(idx.Name+"_", f): + i, err := strconv.ParseInt(strings.TrimLeft(idx.Name, idx.Name+"_"), 10, 64) + return err == nil && i > 1 + case len(idx.Parts) == 0 || idx.Parts[0].C == nil: + return false + } + // Unnamed INDEX or UNIQUE constraints are named by + // the first index-part (as column or part of it). + // For example, "c", "c_2", "c_3", etc. + switch name := idx.Parts[0].C.Name; { + case idx.Name == name: + return true + case strings.HasPrefix(idx.Name, name+"_"): + i, err := strconv.ParseInt(strings.TrimPrefix(idx.Name, name+"_"), 10, 64) + return err == nil && i > 1 + default: + return false + } +} + +// IndexAttrChanged reports if the index attributes were changed. +func (*diff) IndexAttrChanged(from, to []schema.Attr) bool { + return indexType(from).T != indexType(to).T +} + +// IndexPartAttrChanged reports if the index-part attributes (collation or prefix) were changed. +func (*diff) IndexPartAttrChanged(fromI, toI *schema.Index, i int) bool { + var s1, s2 SubPart + return sqlx.Has(fromI.Parts[i].Attrs, &s1) != sqlx.Has(toI.Parts[i].Attrs, &s2) || s1.Len != s2.Len +} + +// ReferenceChanged reports if the foreign key referential action was changed. +func (*diff) ReferenceChanged(from, to schema.ReferenceOption) bool { + // According to MySQL docs, foreign key constraints are checked + // immediately, so NO ACTION is the same as RESTRICT. Specifying + // RESTRICT (or NO ACTION) is the same as omitting the ON DELETE + // or ON UPDATE clause. + if from == "" || from == schema.Restrict { + from = schema.NoAction + } + if to == "" || to == schema.Restrict { + to = schema.NoAction + } + return from != to +} + +// Normalize implements the sqlx.Normalizer interface. +func (d *diff) Normalize(from, to *schema.Table) error { + indexes := make([]*schema.Index, 0, len(from.Indexes)) + for _, idx := range from.Indexes { + // MySQL requires that foreign key columns be indexed; Therefore, if the child + // table is defined on non-indexed columns, an index is automatically created + // to satisfy the constraint. + // Therefore, if no such key was defined on the desired state, the diff will + // recommend dropping it on migration. Therefore, we fix it by dropping it from + // the current state manually. + if _, ok := to.Index(idx.Name); ok || !keySupportsFK(from, idx) { + indexes = append(indexes, idx) + } + } + from.Indexes = indexes + + // Avoid proposing changes to the table COLLATE or CHARSET + // in case only one of these properties is defined. + if err := d.defaultCollate(&to.Attrs); err != nil { + return err + } + return d.defaultCharset(&to.Attrs) +} + +// FindTable implements the DiffDriver.TableFinder method in order to provide +// tables lookup that respect the "lower_case_table_names" system variable. +func (d *diff) FindTable(s *schema.Schema, name string) (*schema.Table, error) { + switch d.lcnames { + // In mode 0: tables are stored as specified, and comparisons are case-sensitive. + case 0: + t, ok := s.Table(name) + if !ok { + return nil, &schema.NotExistError{Err: fmt.Errorf("table %q was not found", name)} + } + return t, nil + // In mode 1: the table are stored in lowercase, but they are still + // returned on inspection, because comparisons are not case-sensitive. + // In mode 2: the tables are stored as given but compared in lowercase. + // This option is not supported by Linux-based systems. + case 1, 2: + var matches []*schema.Table + for _, t := range s.Tables { + if strings.ToLower(name) == strings.ToLower(t.Name) { + matches = append(matches, t) + } + } + switch n := len(matches); n { + case 0: + return nil, &schema.NotExistError{Err: fmt.Errorf("table %q was not found", name)} + case 1: + return matches[0], nil + default: + return nil, fmt.Errorf("%d matches found for table %q", n, name) + } + default: + return nil, fmt.Errorf("unsupported 'lower_case_table_names' mode: %d", d.lcnames) + } +} + +// collationChange returns the schema change for migrating the collation if +// it was changed, and it is not the default attribute inherited from its parent. +func (*diff) collationChange(from, top, to []schema.Attr) schema.Change { + var fromC, topC, toC schema.Collation + switch fromHas, topHas, toHas := sqlx.Has(from, &fromC), sqlx.Has(top, &topC), sqlx.Has(to, &toC); { + case !fromHas && !toHas: + case !fromHas: + return &schema.AddAttr{ + A: &toC, + } + case !toHas: + // There is no way to DROP a COLLATE that was configured on the table, + // and it is not the default. Therefore, we use ModifyAttr and give it + // the inherited (and default) collation from schema or server. + if topHas && fromC.V != topC.V { + return &schema.ModifyAttr{ + From: &fromC, + To: &topC, + } + } + case fromC.V != toC.V: + return &schema.ModifyAttr{ + From: &fromC, + To: &toC, + } + } + return noChange +} + +// charsetChange returns the schema change for migrating the collation if +// it was changed, and it is not the default attribute inherited from its parent. +func (*diff) charsetChange(from, top, to []schema.Attr) schema.Change { + var fromC, topC, toC schema.Charset + switch fromHas, topHas, toHas := sqlx.Has(from, &fromC), sqlx.Has(top, &topC), sqlx.Has(to, &toC); { + case !fromHas && !toHas: + case !fromHas: + return &schema.AddAttr{ + A: &toC, + } + case !toHas: + // There is no way to DROP a CHARSET that was configured on the table, + // and it is not the default. Therefore, we use ModifyAttr and give it + // the inherited (and default) collation from schema or server. + if topHas && fromC.V != topC.V { + return &schema.ModifyAttr{ + From: &fromC, + To: &topC, + } + } + case fromC.V != toC.V: + return &schema.ModifyAttr{ + From: &fromC, + To: &toC, + } + } + return noChange +} + +// columnCharsetChange indicates if there is a change to the column charset. +func (d *diff) columnCharsetChanged(fromT *schema.Table, from, to *schema.Column) (bool, error) { + if err := d.defaultCharset(&to.Attrs); err != nil { + return false, err + } + var ( + fromC, topC, toC schema.Charset + fromHas, topHas, toHas = sqlx.Has(from.Attrs, &fromC), sqlx.Has(fromT.Attrs, &topC), sqlx.Has(to.Attrs, &toC) + ) + // Column was updated with custom CHARSET that was dropped. + // Hence, we should revert to the one defined on the table. + return fromHas && !toHas && topHas && fromC.V != topC.V || + // Custom CHARSET was added to the column. Hence, + // Does not match the one defined in the table. + !fromHas && toHas && topHas && toC.V != topC.V || + // CHARSET was explicitly changed. + fromHas && toHas && fromC.V != toC.V, nil + +} + +// columnCollateChanged indicates if there is a change to the column charset. +func (d *diff) columnCollateChanged(fromT *schema.Table, from, to *schema.Column) (bool, error) { + if err := d.defaultCollate(&to.Attrs); err != nil { + return false, err + } + var ( + fromC, topC, toC schema.Collation + fromHas, topHas, toHas = sqlx.Has(from.Attrs, &fromC), sqlx.Has(fromT.Attrs, &topC), sqlx.Has(to.Attrs, &toC) + ) + // Column was updated with custom COLLATE that was dropped. + // Hence, we should revert to the one defined on the table. + return fromHas && !toHas && topHas && fromC.V != topC.V || + // Custom COLLATE was added to the column. Hence, + // Does not match the one defined in the table. + !fromHas && toHas && topHas && toC.V != topC.V || + // COLLATE was explicitly changed. + fromHas && toHas && fromC.V != toC.V, nil + +} + +// autoIncChange returns the schema change for changing the AUTO_INCREMENT +// attribute in case it is not the default. +func (*diff) autoIncChange(from, to []schema.Attr) schema.Change { + var fromA, toA AutoIncrement + switch fromHas, toHas := sqlx.Has(from, &fromA), sqlx.Has(to, &toA); { + // Ignore if the AUTO_INCREMENT attribute was dropped from the desired schema. + case fromHas && !toHas: + // The AUTO_INCREMENT exists in the desired schema, and may not exist in the inspected one. + // This can happen because older versions of MySQL (< 8.0) stored the AUTO_INCREMENT counter + // in main memory (not persistent), and the value is reset on process restart for empty tables. + case toA.V > 1 && toA.V > fromA.V: + // Suggest a diff only if the desired value is greater than the inspected one, + // because this attribute cannot be maintained in users schema and used to set + // up only the initial value. + return &schema.ModifyAttr{ + From: &fromA, + To: &toA, + } + } + return noChange +} + +// indexType returns the index type from its attribute. +// The default type is BTREE if no type was specified. +func indexType(attr []schema.Attr) *IndexType { + t := &IndexType{T: IndexTypeBTree} + if sqlx.Has(attr, t) { + t.T = strings.ToUpper(t.T) + } + return t +} + +// enforced returns the ENFORCED attribute for the CHECK +// constraint. A CHECK is ENFORCED if not state otherwise. +func enforced(attr []schema.Attr) bool { + if e := (Enforced{}); sqlx.Has(attr, &e) { + return e.V + } + return true +} + +// noChange describes a zero change. +var noChange struct{ schema.Change } + +func (d *diff) typeChanged(from, to *schema.Column) (bool, error) { + fromT, toT := from.Type.Type, to.Type.Type + if fromT == nil || toT == nil { + return false, fmt.Errorf("mysql: missing type information for column %q", from.Name) + } + if reflect.TypeOf(fromT) != reflect.TypeOf(toT) { + return true, nil + } + var changed bool + switch fromT := fromT.(type) { + case *BitType, *schema.BinaryType, *schema.BoolType, *schema.DecimalType, *schema.FloatType, + *schema.JSONType, *schema.StringType, *schema.SpatialType, *schema.TimeType, *schema.UUIDType: + ft, err := FormatType(fromT) + if err != nil { + return false, err + } + tt, err := FormatType(toT) + if err != nil { + return false, err + } + changed = ft != tt + case *schema.EnumType: + toT := toT.(*schema.EnumType) + changed = !sqlx.ValuesEqual(fromT.Values, toT.Values) + case *schema.IntegerType: + toT := toT.(*schema.IntegerType) + // MySQL v8.0.19 dropped both display-width + // and zerofill from the information schema. + if d.SupportsDisplayWidth() { + ft, _, _, err := parseColumn(fromT.T) + if err != nil { + return false, err + } + tt, _, _, err := parseColumn(toT.T) + if err != nil { + return false, err + } + fromT.T, toT.T = ft[0], tt[0] + } + changed = fromT.T != toT.T || fromT.Unsigned != toT.Unsigned + case *SetType: + toT := toT.(*SetType) + changed = !sqlx.ValuesEqual(fromT.Values, toT.Values) + default: + return false, &sqlx.UnsupportedTypeError{Type: fromT} + } + return changed, nil +} + +// defaultChanged reports if the default value of a column was changed. +func (d *diff) defaultChanged(from, to *schema.Column) (bool, error) { + d1, ok1 := sqlx.DefaultValue(from) + d2, ok2 := sqlx.DefaultValue(to) + if ok1 != ok2 { + return true, nil + } + if d1 == d2 { + return false, nil + } + switch from.Type.Type.(type) { + case *schema.BinaryType: + a, err1 := binValue(d1) + b, err2 := binValue(d2) + if err1 != nil || err2 != nil { + return true, nil + } + return !equalsStringValues(a, b), nil + case *schema.BoolType: + a, err1 := boolValue(d1) + b, err2 := boolValue(d2) + if err1 == nil && err2 == nil { + return a != b, nil + } + return false, nil + case *schema.IntegerType: + return !d.equalIntValues(d1, d2), nil + case *schema.FloatType, *schema.DecimalType: + return !d.equalFloatValues(d1, d2), nil + case *schema.EnumType, *SetType, *schema.StringType: + return !equalsStringValues(d1, d2), nil + case *schema.TimeType: + x1 := strings.ToLower(strings.Trim(d1, "' ()")) + x2 := strings.ToLower(strings.Trim(d2, "' ()")) + return !equalsStringValues(x1, x2), nil + default: + x1 := strings.Trim(d1, "'") + x2 := strings.Trim(d2, "'") + return x1 != x2, nil + } +} + +// generatedChanged reports if the generated expression of a column was changed. +func (*diff) generatedChanged(from, to *schema.Column) (bool, error) { + var ( + fromX, toX schema.GeneratedExpr + fromHas, toHas = sqlx.Has(from.Attrs, &fromX), sqlx.Has(to.Attrs, &toX) + ) + if !fromHas && !toHas || fromHas && toHas && sqlx.MayWrap(fromX.Expr) == sqlx.MayWrap(toX.Expr) && storedOrVirtual(fromX.Type) == storedOrVirtual(toX.Type) { + return false, nil + } + // Checking validity of the change is done + // by the planner (checkChangeGenerated). + return true, nil +} + +// equalIntValues report if the 2 int default values are ~equal. +// Note that default expression are not supported atm. +func (d *diff) equalIntValues(x1, x2 string) bool { + x1 = strings.ToLower(strings.Trim(x1, "' ")) + x2 = strings.ToLower(strings.Trim(x2, "' ")) + if x1 == x2 { + return true + } + d1, err := strconv.ParseInt(x1, 10, 64) + if err != nil { + // Numbers are rounded down to their nearest integer. + f, err := strconv.ParseFloat(x1, 64) + if err != nil { + return false + } + d1 = int64(f) + } + d2, err := strconv.ParseInt(x2, 10, 64) + if err != nil { + // Numbers are rounded down to their nearest integer. + f, err := strconv.ParseFloat(x2, 64) + if err != nil { + return false + } + d2 = int64(f) + } + return d1 == d2 +} + +// equalFloatValues report if the 2 float default values are ~equal. +// Note that default expression are not supported atm. +func (d *diff) equalFloatValues(x1, x2 string) bool { + x1 = strings.ToLower(strings.Trim(x1, "' ")) + x2 = strings.ToLower(strings.Trim(x2, "' ")) + if x1 == x2 { + return true + } + d1, err := strconv.ParseFloat(x1, 64) + if err != nil { + return false + } + d2, err := strconv.ParseFloat(x2, 64) + if err != nil { + return false + } + return d1 == d2 +} + +// equalsStringValues report if the 2 string default values are +// equal after dropping their quotes. +func equalsStringValues(x1, x2 string) bool { + a, err1 := sqlx.Unquote(x1) + b, err2 := sqlx.Unquote(x2) + return a == b && err1 == nil && err2 == nil +} + +// boolValue returns the MySQL boolean value from the given string (if it is known). +func boolValue(x string) (bool, error) { + switch x { + case "1", "'1'", "TRUE", "true": + return true, nil + case "0", "'0'", "FALSE", "false": + return false, nil + default: + return false, fmt.Errorf("mysql: unknown value: %q", x) + } +} + +// binValue returns the MySQL binary value from the given string (if it is known). +func binValue(x string) (string, error) { + if !isHex(x) { + return x, nil + } + d, err := hex.DecodeString(x[2:]) + if err != nil { + return x, err + } + return string(d), nil +} + +// keySupportsFK reports if the index key was created automatically by MySQL +// to support the constraint. See sql/sql_table.cc#find_fk_supporting_key. +func keySupportsFK(t *schema.Table, idx *schema.Index) bool { + if _, ok := t.ForeignKey(idx.Name); ok { + return true + } +search: + for _, fk := range t.ForeignKeys { + if len(fk.Columns) != len(idx.Parts) { + continue + } + for i, c := range fk.Columns { + if idx.Parts[i].C == nil || idx.Parts[i].C.Name != c.Name { + continue search + } + } + return true + } + return false +} + +// defaultCollate appends the default COLLATE to the attributes in case a +// custom character-set was defined for the element and the COLLATE was not. +func (d *diff) defaultCollate(attrs *[]schema.Attr) error { + var charset schema.Charset + if !sqlx.Has(*attrs, &charset) || sqlx.Has(*attrs, &schema.Collation{}) { + return nil + } + d.ch2co.Do(func() { + d.ch2co.v, d.ch2co.err = d.CharsetToCollate() + }) + if d.ch2co.err != nil { + return d.ch2co.err + } + v, ok := d.ch2co.v[charset.V] + if !ok { + return fmt.Errorf("mysql: unknown character set: %q", charset.V) + } + schema.ReplaceOrAppend(attrs, &schema.Collation{V: v}) + return nil +} + +// defaultCharset appends the default CHARSET to the attributes in case a +// custom collation was defined for the element and the CHARSET was not. +func (d *diff) defaultCharset(attrs *[]schema.Attr) error { + var collate schema.Collation + if !sqlx.Has(*attrs, &collate) || sqlx.Has(*attrs, &schema.Charset{}) { + return nil + } + d.co2ch.Do(func() { + d.co2ch.v, d.co2ch.err = d.CollateToCharset() + }) + if d.co2ch.err != nil { + return d.co2ch.err + } + v, ok := d.co2ch.v[collate.V] + if !ok { + return fmt.Errorf("mysql: unknown collation: %q", collate.V) + } + schema.ReplaceOrAppend(attrs, &schema.Charset{V: v}) + return nil +} diff --git a/vendor/ariga.io/atlas/sql/mysql/driver.go b/vendor/ariga.io/atlas/sql/mysql/driver.go new file mode 100644 index 00000000..eb76c16d --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/driver.go @@ -0,0 +1,373 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package mysql + +import ( + "context" + "fmt" + "net/url" + "strings" + "time" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/mysql/internal/mysqlversion" + "ariga.io/atlas/sql/schema" + "ariga.io/atlas/sql/sqlclient" +) + +type ( + // Driver represents a MySQL driver for introspecting database schemas, + // generating diff between schema elements and apply migrations changes. + Driver struct { + conn + schema.Differ + schema.Inspector + migrate.PlanApplier + } + + // database connection and its information. + conn struct { + schema.ExecQuerier + // System variables that are set on `Open`. + mysqlversion.V + collate string + charset string + lcnames int + } +) + +// DriverName holds the name used for registration. +const DriverName = "mysql" + +func init() { + sqlclient.Register( + DriverName, + sqlclient.DriverOpener(Open), + sqlclient.RegisterCodec(MarshalHCL, EvalHCL), + sqlclient.RegisterFlavours("mysql+unix", "maria", "maria+unix", "mariadb", "mariadb+unix"), + sqlclient.RegisterURLParser(parser{}), + ) +} + +// Open opens a new MySQL driver. +func Open(db schema.ExecQuerier) (migrate.Driver, error) { + c := conn{ExecQuerier: db} + rows, err := db.QueryContext(context.Background(), variablesQuery) + if err != nil { + return nil, fmt.Errorf("mysql: query system variables: %w", err) + } + if err := sqlx.ScanOne(rows, &c.V, &c.collate, &c.charset, &c.lcnames); err != nil { + return nil, fmt.Errorf("mysql: scan system variables: %w", err) + } + if c.TiDB() { + return &Driver{ + conn: c, + Differ: &sqlx.Diff{DiffDriver: &tdiff{diff{conn: c}}}, + Inspector: &tinspect{inspect{c}}, + PlanApplier: &tplanApply{planApply{c}}, + }, nil + } + return &Driver{ + conn: c, + Differ: &sqlx.Diff{DiffDriver: &diff{conn: c}}, + Inspector: &inspect{c}, + PlanApplier: &planApply{c}, + }, nil +} + +func (d *Driver) dev() *sqlx.DevDriver { + return &sqlx.DevDriver{Driver: d, MaxNameLen: 64} +} + +// NormalizeRealm returns the normal representation of the given database. +func (d *Driver) NormalizeRealm(ctx context.Context, r *schema.Realm) (*schema.Realm, error) { + return d.dev().NormalizeRealm(ctx, r) +} + +// NormalizeSchema returns the normal representation of the given database. +func (d *Driver) NormalizeSchema(ctx context.Context, s *schema.Schema) (*schema.Schema, error) { + return d.dev().NormalizeSchema(ctx, s) +} + +// Lock implements the schema.Locker interface. +func (d *Driver) Lock(ctx context.Context, name string, timeout time.Duration) (schema.UnlockFunc, error) { + conn, err := sqlx.SingleConn(ctx, d.ExecQuerier) + if err != nil { + return nil, err + } + if err := acquire(ctx, conn, name, timeout); err != nil { + conn.Close() + return nil, err + } + return func() error { + defer conn.Close() + rows, err := conn.QueryContext(ctx, "SELECT RELEASE_LOCK(?)", name) + if err != nil { + return err + } + switch released, err := sqlx.ScanNullBool(rows); { + case err != nil: + return err + case !released.Valid || !released.Bool: + return fmt.Errorf("sql/mysql: failed releasing a named lock %q", name) + } + return nil + }, nil +} + +// Snapshot implements migrate.Snapshoter. +func (d *Driver) Snapshot(ctx context.Context) (migrate.RestoreFunc, error) { + // If the connection is bound to a schema, we can restore the state if the schema has no tables. + s, err := d.InspectSchema(ctx, "", nil) + if err != nil && !schema.IsNotExistError(err) { + return nil, err + } + // If a schema was found, it has to have no tables attached to be considered clean. + if s != nil { + if len(s.Tables) > 0 { + return nil, &migrate.NotCleanError{Reason: fmt.Sprintf("found table %q in schema %q", s.Tables[0].Name, s.Name)} + } + return func(ctx context.Context) error { + current, err := d.InspectSchema(ctx, s.Name, nil) + if err != nil { + return err + } + changes, err := d.SchemaDiff(current, s) + if err != nil { + return err + } + return d.ApplyChanges(ctx, changes) + }, nil + } + // Otherwise, the database can not have any schema. + realm, err := d.InspectRealm(ctx, nil) + if err != nil { + return nil, err + } + if len(realm.Schemas) > 0 { + return nil, &migrate.NotCleanError{Reason: fmt.Sprintf("found schema %q", realm.Schemas[0].Name)} + } + return func(ctx context.Context) error { + current, err := d.InspectRealm(ctx, nil) + if err != nil { + return err + } + changes, err := d.RealmDiff(current, realm) + if err != nil { + return err + } + return d.ApplyChanges(ctx, changes) + }, nil +} + +// CheckClean implements migrate.CleanChecker. +func (d *Driver) CheckClean(ctx context.Context, revT *migrate.TableIdent) error { + if revT == nil { // accept nil values + revT = &migrate.TableIdent{} + } + s, err := d.InspectSchema(ctx, "", nil) + if err != nil && !schema.IsNotExistError(err) { + return err + } + if s != nil { + if len(s.Tables) == 0 || (revT.Schema == "" || s.Name == revT.Schema) && len(s.Tables) == 1 && s.Tables[0].Name == revT.Name { + return nil + } + return &migrate.NotCleanError{Reason: fmt.Sprintf("found table %q in schema %q", s.Tables[0].Name, s.Name)} + } + r, err := d.InspectRealm(ctx, nil) + if err != nil { + return err + } + switch n := len(r.Schemas); { + case n > 1: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found multiple schemas: %d", len(r.Schemas))} + case n == 1 && r.Schemas[0].Name != revT.Schema: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found schema %q", r.Schemas[0].Name)} + case n == 1 && len(r.Schemas[0].Tables) > 1: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found multiple tables: %d", len(r.Schemas[0].Tables))} + case n == 1 && len(r.Schemas[0].Tables) == 1 && r.Schemas[0].Tables[0].Name != revT.Name: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found table %q", r.Schemas[0].Tables[0].Name)} + } + return nil +} + +// Version returns the version of the connected database. +func (d *Driver) Version() string { + return string(d.conn.V) +} + +func acquire(ctx context.Context, conn schema.ExecQuerier, name string, timeout time.Duration) error { + rows, err := conn.QueryContext(ctx, "SELECT GET_LOCK(?, ?)", name, int(timeout.Seconds())) + if err != nil { + return err + } + switch acquired, err := sqlx.ScanNullBool(rows); { + case err != nil: + return err + case !acquired.Valid: + // NULL is returned in case of an unexpected internal error. + return fmt.Errorf("sql/mysql: unexpected internal error on Lock(%q, %s)", name, timeout) + case !acquired.Bool: + return schema.ErrLocked + } + return nil +} + +// unescape strings with backslashes returned +// for SQL expressions from information schema. +func unescape(s string) string { + var b strings.Builder + for i := 0; i < len(s); i++ { + switch c := s[i]; { + case c != '\\' || i == len(s)-1: + b.WriteByte(c) + case s[i+1] == '\'', s[i+1] == '\\': + b.WriteByte(s[i+1]) + i++ + } + } + return b.String() +} + +type parser struct{} + +// ParseURL implements the sqlclient.URLParser interface. +func (parser) ParseURL(u *url.URL) *sqlclient.URL { + v := u.Query() + v.Set("parseTime", "true") + u.RawQuery = v.Encode() + return &sqlclient.URL{URL: u, DSN: dsn(u), Schema: strings.TrimPrefix(u.Path, "/")} +} + +// ChangeSchema implements the sqlclient.SchemaChanger interface. +func (parser) ChangeSchema(u *url.URL, s string) *url.URL { + nu := *u + nu.Path = "/" + s + return &nu +} + +// dsn returns the MySQL standard DSN for opening +// the sql.DB from the user provided URL. +func dsn(u *url.URL) string { + var ( + b strings.Builder + values = u.Query() + ) + b.WriteString(u.User.Username()) + if p, ok := u.User.Password(); ok { + b.WriteByte(':') + b.WriteString(p) + } + if b.Len() > 0 { + b.WriteByte('@') + } + switch { + case strings.HasSuffix(u.Scheme, "+unix"): + b.WriteString("unix(") + // The path is always absolute, and + // therefore the host should be empty. + b.WriteString(u.Path) + b.WriteString(")/") + if name := values.Get("database"); name != "" { + b.WriteString(name) + values.Del("database") + } + default: + if u.Host != "" { + b.WriteString("tcp(") + b.WriteString(u.Host) + b.WriteByte(')') + } + if u.Path != "" { + b.WriteString(u.Path) + } else { + b.WriteByte('/') + } + } + if p := values.Encode(); p != "" { + b.WriteByte('?') + b.WriteString(p) + } + return b.String() +} + +// MySQL standard column types as defined in its codebase. Name and order +// is organized differently than MySQL. +// +// https://github.com/mysql/mysql-server/blob/8.0/include/field_types.h +// https://github.com/mysql/mysql-server/blob/8.0/sql/dd/types/column.h +// https://github.com/mysql/mysql-server/blob/8.0/sql/sql_show.cc +// https://github.com/mysql/mysql-server/blob/8.0/sql/gis/geometries.cc +// https://dev.mysql.com/doc/refman/8.0/en/other-vendor-data-types.html +const ( + TypeBool = "bool" + TypeBoolean = "boolean" + + TypeBit = "bit" // MYSQL_TYPE_BIT + TypeInt = "int" // MYSQL_TYPE_LONG + TypeTinyInt = "tinyint" // MYSQL_TYPE_TINY + TypeSmallInt = "smallint" // MYSQL_TYPE_SHORT + TypeMediumInt = "mediumint" // MYSQL_TYPE_INT24 + TypeBigInt = "bigint" // MYSQL_TYPE_LONGLONG + + TypeDecimal = "decimal" // MYSQL_TYPE_DECIMAL + TypeNumeric = "numeric" // MYSQL_TYPE_DECIMAL (numeric_type rule in sql_yacc.yy) + TypeFloat = "float" // MYSQL_TYPE_FLOAT + TypeDouble = "double" // MYSQL_TYPE_DOUBLE + TypeReal = "real" // MYSQL_TYPE_FLOAT or MYSQL_TYPE_DOUBLE (real_type in sql_yacc.yy) + + TypeTimestamp = "timestamp" // MYSQL_TYPE_TIMESTAMP + TypeDate = "date" // MYSQL_TYPE_DATE + TypeTime = "time" // MYSQL_TYPE_TIME + TypeDateTime = "datetime" // MYSQL_TYPE_DATETIME + TypeYear = "year" // MYSQL_TYPE_YEAR + + TypeVarchar = "varchar" // MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VARCHAR + TypeChar = "char" // MYSQL_TYPE_STRING + TypeVarBinary = "varbinary" // MYSQL_TYPE_VAR_STRING + NULL CHARACTER_SET. + TypeBinary = "binary" // MYSQL_TYPE_STRING + NULL CHARACTER_SET. + TypeBlob = "blob" // MYSQL_TYPE_BLOB + TypeTinyBlob = "tinyblob" // MYSQL_TYPE_TINYBLOB + TypeMediumBlob = "mediumblob" // MYSQL_TYPE_MEDIUM_BLOB + TypeLongBlob = "longblob" // MYSQL_TYPE_LONG_BLOB + TypeText = "text" // MYSQL_TYPE_BLOB + CHARACTER_SET utf8mb4 + TypeTinyText = "tinytext" // MYSQL_TYPE_TINYBLOB + CHARACTER_SET utf8mb4 + TypeMediumText = "mediumtext" // MYSQL_TYPE_MEDIUM_BLOB + CHARACTER_SET utf8mb4 + TypeLongText = "longtext" // MYSQL_TYPE_LONG_BLOB with + CHARACTER_SET utf8mb4 + + TypeEnum = "enum" // MYSQL_TYPE_ENUM + TypeSet = "set" // MYSQL_TYPE_SET + TypeJSON = "json" // MYSQL_TYPE_JSON + + TypeGeometry = "geometry" // MYSQL_TYPE_GEOMETRY + TypePoint = "point" // Geometry_type::kPoint + TypeMultiPoint = "multipoint" // Geometry_type::kMultipoint + TypeLineString = "linestring" // Geometry_type::kLinestring + TypeMultiLineString = "multilinestring" // Geometry_type::kMultilinestring + TypePolygon = "polygon" // Geometry_type::kPolygon + TypeMultiPolygon = "multipolygon" // Geometry_type::kMultipolygon + TypeGeoCollection = "geomcollection" // Geometry_type::kGeometrycollection + TypeGeometryCollection = "geometrycollection" // Geometry_type::kGeometrycollection + + TypeUUID = "uuid" // MariaDB supported uuid type from 10.7.0+ +) + +// Additional common constants in MySQL. +const ( + IndexTypeBTree = "BTREE" + IndexTypeHash = "HASH" + IndexTypeFullText = "FULLTEXT" + IndexTypeSpatial = "SPATIAL" + + currentTS = "current_timestamp" + defaultGen = "default_generated" + autoIncrement = "auto_increment" + + virtual = "VIRTUAL" + stored = "STORED" + persistent = "PERSISTENT" +) diff --git a/vendor/ariga.io/atlas/sql/mysql/inspect.go b/vendor/ariga.io/atlas/sql/mysql/inspect.go new file mode 100644 index 00000000..73986822 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/inspect.go @@ -0,0 +1,882 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package mysql + +import ( + "context" + "database/sql" + "fmt" + "regexp" + "strconv" + "strings" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/schema" +) + +// A diff provides a MySQL implementation for schema.Inspector. +type inspect struct{ conn } + +var _ schema.Inspector = (*inspect)(nil) + +// InspectRealm returns schema descriptions of all resources in the given realm. +func (i *inspect) InspectRealm(ctx context.Context, opts *schema.InspectRealmOption) (*schema.Realm, error) { + schemas, err := i.schemas(ctx, opts) + if err != nil { + return nil, err + } + if opts == nil { + opts = &schema.InspectRealmOption{} + } + r := schema.NewRealm(schemas...).SetCharset(i.charset).SetCollation(i.collate) + if len(schemas) == 0 || !sqlx.ModeInspectRealm(opts).Is(schema.InspectTables) { + return r, nil + } + if err := i.inspectTables(ctx, r, nil); err != nil { + return nil, err + } + sqlx.LinkSchemaTables(schemas) + return sqlx.ExcludeRealm(r, opts.Exclude) +} + +// InspectSchema returns schema descriptions of the tables in the given schema. +// If the schema name is empty, the result will be the attached schema. +func (i *inspect) InspectSchema(ctx context.Context, name string, opts *schema.InspectOptions) (*schema.Schema, error) { + schemas, err := i.schemas(ctx, &schema.InspectRealmOption{Schemas: []string{name}}) + if err != nil { + return nil, err + } + switch n := len(schemas); { + case n == 0: + return nil, &schema.NotExistError{Err: fmt.Errorf("mysql: schema %q was not found", name)} + case n > 1: + return nil, fmt.Errorf("mysql: %d schemas were found for %q", n, name) + } + if opts == nil { + opts = &schema.InspectOptions{} + } + r := schema.NewRealm(schemas...).SetCharset(i.charset).SetCollation(i.collate) + if sqlx.ModeInspectSchema(opts).Is(schema.InspectTables) { + if err := i.inspectTables(ctx, r, opts); err != nil { + return nil, err + } + sqlx.LinkSchemaTables(schemas) + } + return sqlx.ExcludeSchema(r.Schemas[0], opts.Exclude) +} + +func (i *inspect) inspectTables(ctx context.Context, r *schema.Realm, opts *schema.InspectOptions) error { + if err := i.tables(ctx, r, opts); err != nil { + return err + } + for _, s := range r.Schemas { + if len(s.Tables) == 0 { + continue + } + if err := i.columns(ctx, s); err != nil { + return err + } + if err := i.indexes(ctx, s); err != nil { + return err + } + if err := i.fks(ctx, s); err != nil { + return err + } + if err := i.checks(ctx, s); err != nil { + return err + } + if err := i.showCreate(ctx, s); err != nil { + return err + } + } + return nil +} + +// schemas returns the list of the schemas in the database. +func (i *inspect) schemas(ctx context.Context, opts *schema.InspectRealmOption) ([]*schema.Schema, error) { + var ( + args []any + query = schemasQuery + ) + if opts != nil { + switch n := len(opts.Schemas); { + case n == 1 && opts.Schemas[0] == "": + query = fmt.Sprintf(schemasQueryArgs, "= SCHEMA()") + case n == 1 && opts.Schemas[0] != "": + query = fmt.Sprintf(schemasQueryArgs, "= ?") + args = append(args, opts.Schemas[0]) + case n > 0: + query = fmt.Sprintf(schemasQueryArgs, "IN ("+nArgs(len(opts.Schemas))+")") + for _, s := range opts.Schemas { + args = append(args, s) + } + } + } + rows, err := i.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("mysql: querying schemas: %w", err) + } + defer rows.Close() + var schemas []*schema.Schema + for rows.Next() { + var name, charset, collation string + if err := rows.Scan(&name, &charset, &collation); err != nil { + return nil, err + } + schemas = append(schemas, &schema.Schema{ + Name: name, + Attrs: []schema.Attr{ + &schema.Charset{ + V: charset, + }, + &schema.Collation{ + V: collation, + }, + }, + }) + } + return schemas, nil +} + +func (i *inspect) tables(ctx context.Context, realm *schema.Realm, opts *schema.InspectOptions) error { + var ( + args []any + query = fmt.Sprintf(tablesQuery, nArgs(len(realm.Schemas))) + ) + for _, s := range realm.Schemas { + args = append(args, s.Name) + } + if opts != nil && len(opts.Tables) > 0 { + for _, t := range opts.Tables { + args = append(args, t) + } + query = fmt.Sprintf(tablesQueryArgs, nArgs(len(realm.Schemas)), nArgs(len(opts.Tables))) + } + rows, err := i.QueryContext(ctx, query, args...) + if err != nil { + return err + } + defer rows.Close() + for rows.Next() { + var ( + autoinc sql.NullInt64 + tSchema, name, charset, collation, comment, options sql.NullString + ) + if err := rows.Scan(&tSchema, &name, &charset, &collation, &autoinc, &comment, &options); err != nil { + return fmt.Errorf("scan table information: %w", err) + } + if !sqlx.ValidString(tSchema) || !sqlx.ValidString(name) { + return fmt.Errorf("invalid schema or table name: %q.%q", tSchema.String, name.String) + } + s, ok := realm.Schema(tSchema.String) + if !ok { + return fmt.Errorf("schema %q was not found in realm", tSchema.String) + } + t := &schema.Table{Name: name.String} + s.AddTables(t) + if sqlx.ValidString(charset) { + t.Attrs = append(t.Attrs, &schema.Charset{ + V: charset.String, + }) + } + if sqlx.ValidString(collation) { + t.Attrs = append(t.Attrs, &schema.Collation{ + V: collation.String, + }) + } + if sqlx.ValidString(comment) { + t.Attrs = append(t.Attrs, &schema.Comment{ + Text: comment.String, + }) + } + if sqlx.ValidString(options) { + t.Attrs = append(t.Attrs, &CreateOptions{ + V: options.String, + }) + } + if autoinc.Valid { + t.Attrs = append(t.Attrs, &AutoIncrement{ + V: autoinc.Int64, + }) + } + } + return rows.Close() +} + +// columns queries and appends the columns of the given table. +func (i *inspect) columns(ctx context.Context, s *schema.Schema) error { + query := columnsQuery + if i.SupportsGeneratedColumns() { + query = columnsExprQuery + } + rows, err := i.querySchema(ctx, query, s) + if err != nil { + return fmt.Errorf("mysql: query schema %q columns: %w", s.Name, err) + } + defer rows.Close() + for rows.Next() { + if err := i.addColumn(s, rows); err != nil { + return fmt.Errorf("mysql: %w", err) + } + } + return rows.Err() +} + +// addColumn scans the current row and adds a new column from it to the table. +func (i *inspect) addColumn(s *schema.Schema, rows *sql.Rows) error { + var table, name, typ, comment, nullable, key, defaults, extra, charset, collation, expr sql.NullString + if err := rows.Scan(&table, &name, &typ, &comment, &nullable, &key, &defaults, &extra, &charset, &collation, &expr); err != nil { + return err + } + t, ok := s.Table(table.String) + if !ok { + return fmt.Errorf("table %q was not found in schema", table.String) + } + c := &schema.Column{ + Name: name.String, + Type: &schema.ColumnType{ + Raw: typ.String, + Null: nullable.String == "YES", + }, + } + ct, err := ParseType(c.Type.Raw) + if err != nil { + return err + } + c.Type.Type = ct + attr, err := parseExtra(extra.String) + if err != nil { + return err + } + if attr.autoinc { + a := &AutoIncrement{} + if !sqlx.Has(t.Attrs, a) { + // A table can have only one AUTO_INCREMENT column. If it was returned as NULL + // from INFORMATION_SCHEMA, it is due to information_schema_stats_expiry, and + // we need to extract it from the 'CREATE TABLE' command. + putShow(t).auto = a + } + c.Attrs = append(c.Attrs, a) + } + if attr.onUpdate != "" { + c.Attrs = append(c.Attrs, &OnUpdate{A: attr.onUpdate}) + } + if x := expr.String; x != "" { + if !i.Maria() { + x = unescape(x) + } + c.SetGeneratedExpr(&schema.GeneratedExpr{Expr: x, Type: attr.generatedType}) + } + if defaults.Valid { + if i.Maria() { + c.Default = i.marDefaultExpr(c, defaults.String) + } else { + c.Default = i.myDefaultExpr(c, defaults.String, attr) + } + } + if sqlx.ValidString(comment) { + c.SetComment(comment.String) + } + if sqlx.ValidString(charset) { + c.SetCharset(charset.String) + } + if sqlx.ValidString(collation) { + c.SetCollation(collation.String) + } + t.AddColumns(c) + // From MySQL doc: A UNIQUE index may be displayed as "PRI" if it is NOT NULL + // and there is no PRIMARY KEY in the table. We detect this in `addIndexes`. + if key.String == "PRI" { + if t.PrimaryKey == nil { + t.PrimaryKey = &schema.Index{Table: t, Name: key.String} + } + t.PrimaryKey.Parts = append(t.PrimaryKey.Parts, &schema.IndexPart{ + C: c, + SeqNo: len(t.PrimaryKey.Parts), + }) + } + return nil +} + +// indexes queries and appends the indexes of the given table. +func (i *inspect) indexes(ctx context.Context, s *schema.Schema) error { + query := i.indexQuery() + rows, err := i.querySchema(ctx, query, s) + if err != nil { + return fmt.Errorf("mysql: query schema %q indexes: %w", s.Name, err) + } + defer rows.Close() + if err := i.addIndexes(s, rows); err != nil { + return err + } + return rows.Err() +} + +// addIndexes scans the rows and adds the indexes to the table. +func (i *inspect) addIndexes(s *schema.Schema, rows *sql.Rows) error { + hasPK := make(map[*schema.Table]bool) + for rows.Next() { + var ( + seqno int + table, name, indexType string + nonuniq, desc sql.NullBool + column, subPart, expr, comment sql.NullString + ) + if err := rows.Scan(&table, &name, &column, &nonuniq, &seqno, &indexType, &desc, &comment, &subPart, &expr); err != nil { + return fmt.Errorf("mysql: scanning indexes for schema %q: %w", s.Name, err) + } + t, ok := s.Table(table) + if !ok { + return fmt.Errorf("table %q was not found in schema", table) + } + // Ignore primary keys. + if name == "PRIMARY" { + hasPK[t] = true + continue + } + idx, ok := t.Index(name) + if !ok { + idx = &schema.Index{ + Name: name, + Unique: !nonuniq.Bool, + Table: t, + Attrs: []schema.Attr{ + &IndexType{T: indexType}, + }, + } + if sqlx.ValidString(comment) { + idx.Attrs = append(t.Attrs, &schema.Comment{ + Text: comment.String, + }) + } + t.Indexes = append(t.Indexes, idx) + } + // Rows are ordered by SEQ_IN_INDEX that specifies the + // position of the column in the index definition. + part := &schema.IndexPart{SeqNo: seqno, Desc: desc.Bool} + switch { + case sqlx.ValidString(expr): + part.X = &schema.RawExpr{X: unescape(expr.String)} + case sqlx.ValidString(column): + part.C, ok = t.Column(column.String) + if !ok { + return fmt.Errorf("mysql: column %q was not found for index %q", column.String, idx.Name) + } + if sqlx.ValidString(subPart) { + n, err := strconv.Atoi(subPart.String) + if err != nil { + return fmt.Errorf("mysql: parse index prefix size %q: %w", subPart.String, err) + } + part.Attrs = append(part.Attrs, &SubPart{ + Len: n, + }) + } + part.C.Indexes = append(part.C.Indexes, idx) + default: + return fmt.Errorf("mysql: invalid part for index %q", idx.Name) + } + idx.Parts = append(idx.Parts, part) + } + for _, t := range s.Tables { + if !hasPK[t] && t.PrimaryKey != nil { + t.PrimaryKey = nil + } + } + return nil +} + +// fks queries and appends the foreign keys of the given table. +func (i *inspect) fks(ctx context.Context, s *schema.Schema) error { + rows, err := i.querySchema(ctx, fksQuery, s) + if err != nil { + return fmt.Errorf("mysql: querying %q foreign keys: %w", s.Name, err) + } + defer rows.Close() + if err := sqlx.SchemaFKs(s, rows); err != nil { + return fmt.Errorf("mysql: %w", err) + } + return rows.Err() +} + +// checks queries and appends the check constraints of the given table. +func (i *inspect) checks(ctx context.Context, s *schema.Schema) error { + query, ok := i.supportsCheck() + if !ok { + return nil + } + rows, err := i.querySchema(ctx, query, s) + if err != nil { + return fmt.Errorf("mysql: querying %q check constraints: %w", s.Name, err) + } + defer rows.Close() + for rows.Next() { + var table, name, clause, enforced sql.NullString + if err := rows.Scan(&table, &name, &clause, &enforced); err != nil { + return fmt.Errorf("mysql: %w", err) + } + t, ok := s.Table(table.String) + if !ok { + return fmt.Errorf("table %q was not found in schema", table.String) + } + check := &schema.Check{ + Name: name.String, + Expr: unescape(clause.String), + } + if i.Maria() { + check.Expr = clause.String + // In MariaDB, JSON is an alias to LONGTEXT. For versions >= 10.4.3, the CHARSET and COLLATE set to utf8mb4 + // and a CHECK constraint is automatically created for the column as well (i.e. JSON_VALID(``)). However, + // we expect tools like Atlas and Ent to manually add this CHECK for older versions of MariaDB. + c, ok := t.Column(check.Name) + if ok && c.Type.Raw == TypeLongText && check.Expr == fmt.Sprintf("json_valid(`%s`)", c.Name) { + c.Type.Raw = TypeJSON + c.Type.Type = &schema.JSONType{T: TypeJSON} + // Unset the inspected CHARSET/COLLATE attributes + // as they are valid only for character types. + c.UnsetCharset().UnsetCollation() + } + } else if enforced.String == "NO" { + // The ENFORCED attribute is not supported by MariaDB. + // Also, skip adding it in case the CHECK is ENFORCED, + // as the default is ENFORCED if not state otherwise. + check.Attrs = append(check.Attrs, &Enforced{V: false}) + } + t.Attrs = append(t.Attrs, check) + } + return rows.Err() +} + +// supportsCheck reports if the connected database supports +// the CHECK clause, and return the querying for getting them. +func (i *inspect) supportsCheck() (string, bool) { + q := myChecksQuery + if i.Maria() { + q = marChecksQuery + } + return q, i.SupportsCheck() +} + +// indexQuery returns the query to retrieve the indexes of the given table. +func (i *inspect) indexQuery() string { + query := indexesNoCommentQuery + if i.SupportsIndexComment() { + query = indexesQuery + } + if i.SupportsIndexExpr() { + query = indexesExprQuery + } + return query +} + +// extraAttr is a parsed version of the information_schema EXTRA column. +type extraAttr struct { + autoinc bool + onUpdate string + generatedType string + defaultGenerated bool +} + +var ( + reGenerateType = regexp.MustCompile(`(?i)^(stored|persistent|virtual) generated$`) + reTimeOnUpdate = regexp.MustCompile(`(?i)^(?:default_generated )?on update (current_timestamp(?:\(\d?\))?)$`) +) + +// parseExtra returns a parsed version of the EXTRA column +// from the INFORMATION_SCHEMA.COLUMNS table. +func parseExtra(extra string) (*extraAttr, error) { + attr := &extraAttr{} + switch el := strings.ToLower(extra); { + case el == "", el == "null": + case el == defaultGen: + attr.defaultGenerated = true + // The column has an expression default value, + // and it is handled in Driver.addColumn. + case el == autoIncrement: + attr.autoinc = true + case reTimeOnUpdate.MatchString(extra): + attr.onUpdate = reTimeOnUpdate.FindStringSubmatch(extra)[1] + case reGenerateType.MatchString(extra): + attr.generatedType = reGenerateType.FindStringSubmatch(extra)[1] + default: + return nil, fmt.Errorf("unknown extra column attribute %q", extra) + } + return attr, nil +} + +// showCreate sets and fixes schema elements that require information from +// the 'SHOW CREATE' command. +func (i *inspect) showCreate(ctx context.Context, s *schema.Schema) error { + for _, t := range s.Tables { + st, ok := popShow(t) + if !ok { + continue + } + if err := i.createStmt(ctx, t); err != nil { + return err + } + if err := i.setAutoInc(st, t); err != nil { + return err + } + } + return nil +} + +var reAutoinc = regexp.MustCompile(`(?i)\s*AUTO_INCREMENT\s*=\s*(\d+)\s*`) + +// setAutoInc extracts the updated AUTO_INCREMENT from CREATE TABLE. +func (i *inspect) setAutoInc(s *showTable, t *schema.Table) error { + if s.auto == nil { + return nil + } + var c CreateStmt + if !sqlx.Has(t.Attrs, &c) { + return fmt.Errorf("missing CREATE TABLE statement in attributes for %q", t.Name) + } + if sqlx.Has(t.Attrs, &AutoIncrement{}) { + return fmt.Errorf("unexpected AUTO_INCREMENT attributes for table: %q", t.Name) + } + matches := reAutoinc.FindStringSubmatch(c.S) + if len(matches) != 2 { + return nil + } + v, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return err + } + s.auto.V = v + t.Attrs = append(t.Attrs, s.auto) + return nil +} + +// createStmt loads the CREATE TABLE statement for the table. +func (i *inspect) createStmt(ctx context.Context, t *schema.Table) error { + c := &CreateStmt{} + b := &sqlx.Builder{QuoteChar: '`'} + rows, err := i.QueryContext(ctx, b.P("SHOW CREATE TABLE").Table(t).String()) + if err != nil { + return fmt.Errorf("query CREATE TABLE %q: %w", t.Name, err) + } + if err := sqlx.ScanOne(rows, &sql.NullString{}, &c.S); err != nil { + return fmt.Errorf("scan CREATE TABLE %q: %w", t.Name, err) + } + t.Attrs = append(t.Attrs, c) + return nil +} + +var reCurrTimestamp = regexp.MustCompile(`(?i)^current_timestamp(?:\(\d?\))?$`) + +// myDefaultExpr returns the correct schema.Expr based on the column attributes for MySQL. +func (i *inspect) myDefaultExpr(c *schema.Column, x string, attr *extraAttr) schema.Expr { + // In MySQL, the DEFAULT_GENERATED indicates the column has an expression default value. + if i.SupportsExprDefault() && attr.defaultGenerated { + // Skip CURRENT_TIMESTAMP, because wrapping it with parens will translate it to now(). + if _, ok := c.Type.Type.(*schema.TimeType); ok && reCurrTimestamp.MatchString(x) { + return &schema.RawExpr{X: x} + } + return &schema.RawExpr{X: sqlx.MayWrap(unescape(x))} + } + switch c.Type.Type.(type) { + case *schema.BinaryType: + // MySQL v8 uses Hexadecimal representation. + if isHex(x) { + return &schema.Literal{V: x} + } + case *BitType, *schema.BoolType, *schema.IntegerType, *schema.DecimalType, *schema.FloatType: + return &schema.Literal{V: x} + case *schema.TimeType: + // "current_timestamp" is exceptional in old versions + // of MySQL for timestamp and datetime data types. + if reCurrTimestamp.MatchString(x) { + return &schema.RawExpr{X: x} + } + } + return &schema.Literal{V: quote(x)} +} + +// parseColumn returns column parts, size and signed-info from a MySQL type. +func parseColumn(typ string) (parts []string, size int, unsigned bool, err error) { + switch parts = strings.FieldsFunc(typ, func(r rune) bool { + return r == '(' || r == ')' || r == ' ' || r == ',' + }); parts[0] { + case TypeTinyInt, TypeSmallInt, TypeMediumInt, TypeInt, TypeBigInt, + TypeDecimal, TypeNumeric, TypeFloat, TypeDouble, TypeReal: + if attr := parts[len(parts)-1]; attr == "unsigned" || attr == "zerofill" { + unsigned = true + } + if len(parts) > 2 || len(parts) == 2 && !unsigned { + size, err = strconv.Atoi(parts[1]) + } + case TypeBit, TypeBinary, TypeVarBinary, TypeChar, TypeVarchar: + if len(parts) > 1 { + size, err = strconv.Atoi(parts[1]) + } + } + if err != nil { + return nil, 0, false, fmt.Errorf("parse %q to int: %w", parts[1], err) + } + return parts, size, unsigned, nil +} + +// hasNumericDefault reports if the given type has a numeric default value. +func hasNumericDefault(t schema.Type) bool { + switch t.(type) { + case *BitType, *schema.BoolType, *schema.IntegerType, *schema.DecimalType, *schema.FloatType: + return true + } + return false +} + +func isHex(x string) bool { return len(x) > 2 && strings.ToLower(x[:2]) == "0x" } + +// marDefaultExpr returns the correct schema.Expr based on the column attributes for MariaDB. +func (i *inspect) marDefaultExpr(c *schema.Column, x string) schema.Expr { + // Unlike MySQL, NULL means default to NULL or no default. + if x == "NULL" { + return nil + } + // From MariaDB 10.2.7, string-based literals are quoted to distinguish them from expressions. + if i.GTE("10.2.7") && sqlx.IsQuoted(x, '\'') { + return &schema.Literal{V: x} + } + // In this case, we need to manually check if the expression is literal, or fallback to raw expression. + switch c.Type.Type.(type) { + case *BitType: + // Bit literal values. See https://mariadb.com/kb/en/binary-literals. + if strings.HasPrefix(x, "b'") && strings.HasSuffix(x, "'") { + return &schema.Literal{V: x} + } + case *schema.BoolType, *schema.IntegerType, *schema.DecimalType, *schema.FloatType: + if _, err := strconv.ParseFloat(x, 64); err == nil { + return &schema.Literal{V: x} + } + case *schema.TimeType: + // "current_timestamp" is exceptional in old versions + // of MySQL (i.e. MariaDB in this case). + if strings.ToLower(x) == currentTS { + return &schema.RawExpr{X: x} + } + } + if !i.SupportsExprDefault() { + return &schema.Literal{V: quote(x)} + } + return &schema.RawExpr{X: sqlx.MayWrap(x)} +} + +func (i *inspect) querySchema(ctx context.Context, query string, s *schema.Schema) (*sql.Rows, error) { + args := []any{s.Name} + for _, t := range s.Tables { + args = append(args, t.Name) + } + return i.QueryContext(ctx, fmt.Sprintf(query, nArgs(len(s.Tables))), args...) +} + +func nArgs(n int) string { return strings.Repeat("?, ", n-1) + "?" } + +const ( + // Query to list system variables. + variablesQuery = "SELECT @@version, @@collation_server, @@character_set_server, @@lower_case_table_names" + + // Query to list database schemas. + schemasQuery = "SELECT `SCHEMA_NAME`, `DEFAULT_CHARACTER_SET_NAME`, `DEFAULT_COLLATION_NAME` from `INFORMATION_SCHEMA`.`SCHEMATA` WHERE `SCHEMA_NAME` NOT IN ('information_schema','innodb','mysql','performance_schema','sys') ORDER BY `SCHEMA_NAME`" + + // Query to list specific database schemas. + schemasQueryArgs = "SELECT `SCHEMA_NAME`, `DEFAULT_CHARACTER_SET_NAME`, `DEFAULT_COLLATION_NAME` from `INFORMATION_SCHEMA`.`SCHEMATA` WHERE `SCHEMA_NAME` %s ORDER BY `SCHEMA_NAME`" + + // Query to list table columns. + columnsQuery = "SELECT `TABLE_NAME`, `COLUMN_NAME`, `COLUMN_TYPE`, `COLUMN_COMMENT`, `IS_NULLABLE`, `COLUMN_KEY`, `COLUMN_DEFAULT`, `EXTRA`, `CHARACTER_SET_NAME`, `COLLATION_NAME`, NULL AS `GENERATION_EXPRESSION` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` IN (%s) ORDER BY `ORDINAL_POSITION`" + columnsExprQuery = "SELECT `TABLE_NAME`, `COLUMN_NAME`, `COLUMN_TYPE`, `COLUMN_COMMENT`, `IS_NULLABLE`, `COLUMN_KEY`, `COLUMN_DEFAULT`, `EXTRA`, `CHARACTER_SET_NAME`, `COLLATION_NAME`, `GENERATION_EXPRESSION` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` IN (%s) ORDER BY `ORDINAL_POSITION`" + + // Query to list table indexes. + indexesQuery = "SELECT `TABLE_NAME`, `INDEX_NAME`, `COLUMN_NAME`, `NON_UNIQUE`, `SEQ_IN_INDEX`, `INDEX_TYPE`, UPPER(`COLLATION`) = 'D' AS `DESC`, `INDEX_COMMENT`, `SUB_PART`, NULL AS `EXPRESSION` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` IN (%s) ORDER BY `index_name`, `seq_in_index`" + indexesExprQuery = "SELECT `TABLE_NAME`, `INDEX_NAME`, `COLUMN_NAME`, `NON_UNIQUE`, `SEQ_IN_INDEX`, `INDEX_TYPE`, UPPER(`COLLATION`) = 'D' AS `DESC`, `INDEX_COMMENT`, `SUB_PART`, `EXPRESSION` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` IN (%s) ORDER BY `index_name`, `seq_in_index`" + indexesNoCommentQuery = "SELECT `TABLE_NAME`, `INDEX_NAME`, `COLUMN_NAME`, `NON_UNIQUE`, `SEQ_IN_INDEX`, `INDEX_TYPE`, UPPER(`COLLATION`) = 'D' AS `DESC`, NULL AS `INDEX_COMMENT`, `SUB_PART`, NULL AS `EXPRESSION` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` IN (%s) ORDER BY `index_name`, `seq_in_index`" + + tablesQuery = ` +SELECT + t1.TABLE_SCHEMA, + t1.TABLE_NAME, + t2.CHARACTER_SET_NAME, + t1.TABLE_COLLATION, + t1.AUTO_INCREMENT, + t1.TABLE_COMMENT, + t1.CREATE_OPTIONS +FROM + INFORMATION_SCHEMA.TABLES AS t1 + LEFT JOIN INFORMATION_SCHEMA.COLLATIONS AS t2 + ON t1.TABLE_COLLATION = t2.COLLATION_NAME +WHERE + TABLE_SCHEMA IN (%s) + AND TABLE_TYPE = 'BASE TABLE' +ORDER BY + TABLE_SCHEMA, TABLE_NAME +` + + tablesQueryArgs = ` +SELECT + t1.TABLE_SCHEMA, + t1.TABLE_NAME, + t2.CHARACTER_SET_NAME, + t1.TABLE_COLLATION, + t1.AUTO_INCREMENT, + t1.TABLE_COMMENT, + t1.CREATE_OPTIONS +FROM + INFORMATION_SCHEMA.TABLES AS t1 + JOIN INFORMATION_SCHEMA.COLLATIONS AS t2 + ON t1.TABLE_COLLATION = t2.COLLATION_NAME +WHERE + TABLE_SCHEMA IN (%s) + AND TABLE_NAME IN (%s) + AND TABLE_TYPE = 'BASE TABLE' +ORDER BY + TABLE_SCHEMA, TABLE_NAME +` + + // Query to list table check constraints. + myChecksQuery = `SELECT t1.TABLE_NAME, t1.CONSTRAINT_NAME, t2.CHECK_CLAUSE, t1.ENFORCED` + checksQuery + marChecksQuery = `SELECT t1.TABLE_NAME, t1.CONSTRAINT_NAME, t2.CHECK_CLAUSE, "YES" AS ENFORCED` + checksQuery + checksQuery = ` +FROM + INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS t1 + JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS AS t2 + ON t1.CONSTRAINT_NAME = t2.CONSTRAINT_NAME + AND t1.CONSTRAINT_SCHEMA = t2.CONSTRAINT_SCHEMA +WHERE + t1.CONSTRAINT_TYPE = 'CHECK' + AND t1.TABLE_SCHEMA = ? + AND t1.TABLE_NAME IN (%s) +ORDER BY + t1.CONSTRAINT_NAME +` + + // Query to list table foreign keys. + fksQuery = ` +SELECT + t1.CONSTRAINT_NAME, + t1.TABLE_NAME, + t1.COLUMN_NAME, + t1.TABLE_SCHEMA, + t1.REFERENCED_TABLE_NAME, + t1.REFERENCED_COLUMN_NAME, + t1.REFERENCED_TABLE_SCHEMA, + t3.UPDATE_RULE, + t3.DELETE_RULE +FROM + INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS t1 + JOIN INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS t2 + JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS t3 + ON t1.CONSTRAINT_NAME = t2.CONSTRAINT_NAME + AND t1.CONSTRAINT_NAME = t3.CONSTRAINT_NAME + AND t1.TABLE_SCHEMA = t2.TABLE_SCHEMA + AND t1.TABLE_SCHEMA = t3.CONSTRAINT_SCHEMA +WHERE + t2.CONSTRAINT_TYPE = 'FOREIGN KEY' + AND t1.TABLE_SCHEMA = ? + AND t1.TABLE_NAME IN (%s) +ORDER BY + t1.CONSTRAINT_NAME, + t1.ORDINAL_POSITION` +) + +type ( + // AutoIncrement attribute for columns with "AUTO_INCREMENT" as a default. + // V represent an optional start value for the counter. + AutoIncrement struct { + schema.Attr + V int64 + } + + // CreateOptions attribute for describing extra options used with CREATE TABLE. + CreateOptions struct { + schema.Attr + V string + } + + // CreateStmt describes the SQL statement used to create a table. + CreateStmt struct { + schema.Attr + S string + } + + // OnUpdate attribute for columns with "ON UPDATE CURRENT_TIMESTAMP" as a default. + OnUpdate struct { + schema.Attr + A string + } + + // SubPart attribute defines an option index prefix length for columns. + SubPart struct { + schema.Attr + Len int + } + + // Enforced attribute defines the ENFORCED flag for CHECK constraint. + Enforced struct { + schema.Attr + V bool // V indicates if the CHECK is enforced or not. + } + + // The DisplayWidth represents a display width of an integer type. + DisplayWidth struct { + schema.Attr + N int + } + + // The ZeroFill represents the ZEROFILL attribute which is + // deprecated for MySQL version >= 8.0.17. + ZeroFill struct { + schema.Attr + A string + } + + // IndexType represents an index type. + IndexType struct { + schema.Attr + T string // BTREE, HASH, FULLTEXT, SPATIAL, RTREE + } + + // BitType represents the type bit. + BitType struct { + schema.Type + T string + Size int + } + + // SetType represents a set type. + SetType struct { + schema.Type + Values []string + } + + // putShow is an intermediate table attribute used + // on inspection to indicate if the 'SHOW TABLE' is + // required and for what. + showTable struct { + schema.Attr + // AUTO_INCREMENT value to due missing value in information_schema. + auto *AutoIncrement + } +) + +func putShow(t *schema.Table) *showTable { + for i := range t.Attrs { + if s, ok := t.Attrs[i].(*showTable); ok { + return s + } + } + s := &showTable{} + t.Attrs = append(t.Attrs, s) + return s +} + +func popShow(t *schema.Table) (*showTable, bool) { + for i := range t.Attrs { + if s, ok := t.Attrs[i].(*showTable); ok { + t.Attrs = append(t.Attrs[:i], t.Attrs[i+1:]...) + return s, true + } + } + return nil, false +} diff --git a/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/BUILD b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/BUILD new file mode 100644 index 00000000..930d0ebf --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/BUILD @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "mysqlversion", + srcs = ["mysqlversion.go"], + embedsrcs = [ + "is/.README.md", + "is/charset2collate", + "is/charset2collate.maria", + "is/collate2charset", + "is/collate2charset.maria", + ], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion", + importpath = "ariga.io/atlas/sql/mysql/internal/mysqlversion", + visibility = [ + "//third_party:__subpackages__", + "//vendor/ariga.io/atlas/sql/mysql:__subpackages__", + ], + deps = ["//vendor/golang.org/x/mod/semver"], +) diff --git a/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/.README.md b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/.README.md new file mode 100644 index 00000000..cd483b32 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/.README.md @@ -0,0 +1,14 @@ +## Charset and Collation of MySQL and MariaDB latest versions + +`collate2charset` and `collate2charset.maria` hold a mapping from the collation to their charset. + +```sql +select json_objectagg(collation_name, character_set_name) from information_schema.collations\G; +``` + +`charset2collate` and `charset2collate.maria` hold a mapping from the charset to its default collation extracted +by the following query: + +```sql +select json_objectagg(character_set_name, default_collate_name) from information_schema.character_sets\G; +``` \ No newline at end of file diff --git a/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/charset2collate b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/charset2collate new file mode 100644 index 00000000..ee694a76 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/charset2collate @@ -0,0 +1 @@ +{"gbk": "gbk_chinese_ci", "hp8": "hp8_english_ci", "big5": "big5_chinese_ci", "dec8": "dec8_swedish_ci", "sjis": "sjis_japanese_ci", "swe7": "swe7_swedish_ci", "ucs2": "ucs2_general_ci", "ujis": "ujis_japanese_ci", "utf8": "utf8_general_ci", "ascii": "ascii_general_ci", "cp850": "cp850_general_ci", "cp852": "cp852_general_ci", "cp866": "cp866_general_ci", "cp932": "cp932_japanese_ci", "euckr": "euckr_korean_ci", "greek": "greek_general_ci", "koi8r": "koi8r_general_ci", "koi8u": "koi8u_general_ci", "macce": "macce_general_ci", "utf16": "utf16_general_ci", "utf32": "utf32_general_ci", "binary": "binary", "cp1250": "cp1250_general_ci", "cp1251": "cp1251_general_ci", "cp1256": "cp1256_general_ci", "cp1257": "cp1257_general_ci", "gb2312": "gb2312_chinese_ci", "hebrew": "hebrew_general_ci", "latin1": "latin1_swedish_ci", "latin2": "latin2_general_ci", "latin5": "latin5_turkish_ci", "latin7": "latin7_general_ci", "tis620": "tis620_thai_ci", "eucjpms": "eucjpms_japanese_ci", "gb18030": "gb18030_chinese_ci", "geostd8": "geostd8_general_ci", "keybcs2": "keybcs2_general_ci", "utf16le": "utf16le_general_ci", "utf8mb4": "utf8mb4_0900_ai_ci", "armscii8": "armscii8_general_ci", "macroman": "macroman_general_ci"} \ No newline at end of file diff --git a/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/charset2collate.maria b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/charset2collate.maria new file mode 100644 index 00000000..a78a7b48 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/charset2collate.maria @@ -0,0 +1 @@ +{"big5":"big5_chinese_ci", "dec8":"dec8_swedish_ci", "cp850":"cp850_general_ci", "hp8":"hp8_english_ci", "koi8r":"koi8r_general_ci", "latin1":"latin1_swedish_ci", "latin2":"latin2_general_ci", "swe7":"swe7_swedish_ci", "ascii":"ascii_general_ci", "ujis":"ujis_japanese_ci", "sjis":"sjis_japanese_ci", "hebrew":"hebrew_general_ci", "tis620":"tis620_thai_ci", "euckr":"euckr_korean_ci", "koi8u":"koi8u_general_ci", "gb2312":"gb2312_chinese_ci", "greek":"greek_general_ci", "cp1250":"cp1250_general_ci", "gbk":"gbk_chinese_ci", "latin5":"latin5_turkish_ci", "armscii8":"armscii8_general_ci", "utf8mb3":"utf8mb3_general_ci", "ucs2":"ucs2_general_ci", "cp866":"cp866_general_ci", "keybcs2":"keybcs2_general_ci", "macce":"macce_general_ci", "macroman":"macroman_general_ci", "cp852":"cp852_general_ci", "latin7":"latin7_general_ci", "utf8mb4":"utf8mb4_general_ci", "cp1251":"cp1251_general_ci", "utf16":"utf16_general_ci", "utf16le":"utf16le_general_ci", "cp1256":"cp1256_general_ci", "cp1257":"cp1257_general_ci", "utf32":"utf32_general_ci", "binary":"binary", "geostd8":"geostd8_general_ci", "cp932":"cp932_japanese_ci", "eucjpms":"eucjpms_japanese_ci"} \ No newline at end of file diff --git a/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/collate2charset b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/collate2charset new file mode 100644 index 00000000..92698cc5 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/collate2charset @@ -0,0 +1 @@ +{"binary": "binary", "gbk_bin": "gbk", "hp8_bin": "hp8", "big5_bin": "big5", "dec8_bin": "dec8", "sjis_bin": "sjis", "swe7_bin": "swe7", "ucs2_bin": "ucs2", "ujis_bin": "ujis", "utf8_bin": "utf8", "ascii_bin": "ascii", "cp850_bin": "cp850", "cp852_bin": "cp852", "cp866_bin": "cp866", "cp932_bin": "cp932", "euckr_bin": "euckr", "greek_bin": "greek", "koi8r_bin": "koi8r", "koi8u_bin": "koi8u", "macce_bin": "macce", "utf16_bin": "utf16", "utf32_bin": "utf32", "cp1250_bin": "cp1250", "cp1251_bin": "cp1251", "cp1256_bin": "cp1256", "cp1257_bin": "cp1257", "gb2312_bin": "gb2312", "hebrew_bin": "hebrew", "latin1_bin": "latin1", "latin2_bin": "latin2", "latin5_bin": "latin5", "latin7_bin": "latin7", "tis620_bin": "tis620", "eucjpms_bin": "eucjpms", "gb18030_bin": "gb18030", "geostd8_bin": "geostd8", "keybcs2_bin": "keybcs2", "utf16le_bin": "utf16le", "utf8mb4_bin": "utf8mb4", "armscii8_bin": "armscii8", "macroman_bin": "macroman", "ucs2_czech_ci": "ucs2", "ucs2_roman_ci": "ucs2", "utf8_czech_ci": "utf8", "utf8_roman_ci": "utf8", "gbk_chinese_ci": "gbk", "hp8_english_ci": "hp8", "tis620_thai_ci": "tis620", "ucs2_danish_ci": "ucs2", "ucs2_polish_ci": "ucs2", "ucs2_slovak_ci": "ucs2", "utf16_czech_ci": "utf16", "utf16_roman_ci": "utf16", "utf32_czech_ci": "utf32", "utf32_roman_ci": "utf32", "utf8_danish_ci": "utf8", "utf8_polish_ci": "utf8", "utf8_slovak_ci": "utf8", "big5_chinese_ci": "big5", "cp1250_czech_cs": "cp1250", "dec8_swedish_ci": "dec8", "euckr_korean_ci": "euckr", "latin2_czech_cs": "latin2", "swe7_swedish_ci": "swe7", "ucs2_general_ci": "ucs2", "ucs2_german2_ci": "ucs2", "ucs2_latvian_ci": "ucs2", "ucs2_persian_ci": "ucs2", "ucs2_sinhala_ci": "ucs2", "ucs2_spanish_ci": "ucs2", "ucs2_swedish_ci": "ucs2", "ucs2_turkish_ci": "ucs2", "ucs2_unicode_ci": "ucs2", "utf16_danish_ci": "utf16", "utf16_polish_ci": "utf16", "utf16_slovak_ci": "utf16", "utf32_danish_ci": "utf32", "utf32_polish_ci": "utf32", "utf32_slovak_ci": "utf32", "utf8_general_ci": "utf8", "utf8_german2_ci": "utf8", "utf8_latvian_ci": "utf8", "utf8_persian_ci": "utf8", "utf8_sinhala_ci": "utf8", "utf8_spanish_ci": "utf8", "utf8_swedish_ci": "utf8", "utf8_tolower_ci": "utf8", "utf8_turkish_ci": "utf8", "utf8_unicode_ci": "utf8", "ascii_general_ci": "ascii", "cp1250_polish_ci": "cp1250", "cp850_general_ci": "cp850", "cp852_general_ci": "cp852", "cp866_general_ci": "cp866", "greek_general_ci": "greek", "koi8r_general_ci": "koi8r", "koi8u_general_ci": "koi8u", "latin1_danish_ci": "latin1", "macce_general_ci": "macce", "sjis_japanese_ci": "sjis", "ucs2_croatian_ci": "ucs2", "ucs2_estonian_ci": "ucs2", "ucs2_romanian_ci": "ucs2", "ucs2_spanish2_ci": "ucs2", "ujis_japanese_ci": "ujis", "utf16_general_ci": "utf16", "utf16_german2_ci": "utf16", "utf16_latvian_ci": "utf16", "utf16_persian_ci": "utf16", "utf16_sinhala_ci": "utf16", "utf16_spanish_ci": "utf16", "utf16_swedish_ci": "utf16", "utf16_turkish_ci": "utf16", "utf16_unicode_ci": "utf16", "utf32_general_ci": "utf32", "utf32_german2_ci": "utf32", "utf32_latvian_ci": "utf32", "utf32_persian_ci": "utf32", "utf32_sinhala_ci": "utf32", "utf32_spanish_ci": "utf32", "utf32_swedish_ci": "utf32", "utf32_turkish_ci": "utf32", "utf32_unicode_ci": "utf32", "utf8_croatian_ci": "utf8", "utf8_estonian_ci": "utf8", "utf8_romanian_ci": "utf8", "utf8_spanish2_ci": "utf8", "utf8mb4_0900_bin": "utf8mb4", "utf8mb4_czech_ci": "utf8mb4", "utf8mb4_roman_ci": "utf8mb4", "cp1250_general_ci": "cp1250", "cp1251_general_ci": "cp1251", "cp1251_general_cs": "cp1251", "cp1256_general_ci": "cp1256", "cp1257_general_ci": "cp1257", "cp932_japanese_ci": "cp932", "gb2312_chinese_ci": "gb2312", "hebrew_general_ci": "hebrew", "latin1_general_ci": "latin1", "latin1_general_cs": "latin1", "latin1_german1_ci": "latin1", "latin1_german2_ci": "latin1", "latin1_spanish_ci": "latin1", "latin1_swedish_ci": "latin1", "latin2_general_ci": "latin2", "latin5_turkish_ci": "latin5", "latin7_general_ci": "latin7", "latin7_general_cs": "latin7", "ucs2_esperanto_ci": "ucs2", "ucs2_hungarian_ci": "ucs2", "ucs2_icelandic_ci": "ucs2", "ucs2_slovenian_ci": "ucs2", "utf16_croatian_ci": "utf16", "utf16_estonian_ci": "utf16", "utf16_romanian_ci": "utf16", "utf16_spanish2_ci": "utf16", "utf32_croatian_ci": "utf32", "utf32_estonian_ci": "utf32", "utf32_romanian_ci": "utf32", "utf32_spanish2_ci": "utf32", "utf8_esperanto_ci": "utf8", "utf8_hungarian_ci": "utf8", "utf8_icelandic_ci": "utf8", "utf8_slovenian_ci": "utf8", "utf8mb4_danish_ci": "utf8mb4", "utf8mb4_polish_ci": "utf8mb4", "utf8mb4_slovak_ci": "utf8mb4", "cp1250_croatian_ci": "cp1250", "gb18030_chinese_ci": "gb18030", "geostd8_general_ci": "geostd8", "keybcs2_general_ci": "keybcs2", "latin2_croatian_ci": "latin2", "latin7_estonian_cs": "latin7", "ucs2_lithuanian_ci": "ucs2", "ucs2_vietnamese_ci": "ucs2", "utf16_esperanto_ci": "utf16", "utf16_hungarian_ci": "utf16", "utf16_icelandic_ci": "utf16", "utf16_slovenian_ci": "utf16", "utf16le_general_ci": "utf16le", "utf32_esperanto_ci": "utf32", "utf32_hungarian_ci": "utf32", "utf32_icelandic_ci": "utf32", "utf32_slovenian_ci": "utf32", "utf8_lithuanian_ci": "utf8", "utf8_vietnamese_ci": "utf8", "utf8mb4_0900_ai_ci": "utf8mb4", "utf8mb4_0900_as_ci": "utf8mb4", "utf8mb4_0900_as_cs": "utf8mb4", "utf8mb4_general_ci": "utf8mb4", "utf8mb4_german2_ci": "utf8mb4", "utf8mb4_latvian_ci": "utf8mb4", "utf8mb4_persian_ci": "utf8mb4", "utf8mb4_sinhala_ci": "utf8mb4", "utf8mb4_spanish_ci": "utf8mb4", "utf8mb4_swedish_ci": "utf8mb4", "utf8mb4_turkish_ci": "utf8mb4", "utf8mb4_unicode_ci": "utf8mb4", "armscii8_general_ci": "armscii8", "cp1251_bulgarian_ci": "cp1251", "cp1251_ukrainian_ci": "cp1251", "eucjpms_japanese_ci": "eucjpms", "latin2_hungarian_ci": "latin2", "macroman_general_ci": "macroman", "ucs2_unicode_520_ci": "ucs2", "utf16_lithuanian_ci": "utf16", "utf16_vietnamese_ci": "utf16", "utf32_lithuanian_ci": "utf32", "utf32_vietnamese_ci": "utf32", "utf8_unicode_520_ci": "utf8", "utf8mb4_croatian_ci": "utf8mb4", "utf8mb4_estonian_ci": "utf8mb4", "utf8mb4_romanian_ci": "utf8mb4", "utf8mb4_spanish2_ci": "utf8mb4", "cp1257_lithuanian_ci": "cp1257", "utf16_unicode_520_ci": "utf16", "utf32_unicode_520_ci": "utf32", "utf8mb4_esperanto_ci": "utf8mb4", "utf8mb4_hungarian_ci": "utf8mb4", "utf8mb4_icelandic_ci": "utf8mb4", "utf8mb4_slovenian_ci": "utf8mb4", "utf8mb4_cs_0900_ai_ci": "utf8mb4", "utf8mb4_cs_0900_as_cs": "utf8mb4", "utf8mb4_da_0900_ai_ci": "utf8mb4", "utf8mb4_da_0900_as_cs": "utf8mb4", "utf8mb4_eo_0900_ai_ci": "utf8mb4", "utf8mb4_eo_0900_as_cs": "utf8mb4", "utf8mb4_es_0900_ai_ci": "utf8mb4", "utf8mb4_es_0900_as_cs": "utf8mb4", "utf8mb4_et_0900_ai_ci": "utf8mb4", "utf8mb4_et_0900_as_cs": "utf8mb4", "utf8mb4_hr_0900_ai_ci": "utf8mb4", "utf8mb4_hr_0900_as_cs": "utf8mb4", "utf8mb4_hu_0900_ai_ci": "utf8mb4", "utf8mb4_hu_0900_as_cs": "utf8mb4", "utf8mb4_is_0900_ai_ci": "utf8mb4", "utf8mb4_is_0900_as_cs": "utf8mb4", "utf8mb4_ja_0900_as_cs": "utf8mb4", "utf8mb4_la_0900_ai_ci": "utf8mb4", "utf8mb4_la_0900_as_cs": "utf8mb4", "utf8mb4_lithuanian_ci": "utf8mb4", "utf8mb4_lt_0900_ai_ci": "utf8mb4", "utf8mb4_lt_0900_as_cs": "utf8mb4", "utf8mb4_lv_0900_ai_ci": "utf8mb4", "utf8mb4_lv_0900_as_cs": "utf8mb4", "utf8mb4_pl_0900_ai_ci": "utf8mb4", "utf8mb4_pl_0900_as_cs": "utf8mb4", "utf8mb4_ro_0900_ai_ci": "utf8mb4", "utf8mb4_ro_0900_as_cs": "utf8mb4", "utf8mb4_ru_0900_ai_ci": "utf8mb4", "utf8mb4_ru_0900_as_cs": "utf8mb4", "utf8mb4_sk_0900_ai_ci": "utf8mb4", "utf8mb4_sk_0900_as_cs": "utf8mb4", "utf8mb4_sl_0900_ai_ci": "utf8mb4", "utf8mb4_sl_0900_as_cs": "utf8mb4", "utf8mb4_sv_0900_ai_ci": "utf8mb4", "utf8mb4_sv_0900_as_cs": "utf8mb4", "utf8mb4_tr_0900_ai_ci": "utf8mb4", "utf8mb4_tr_0900_as_cs": "utf8mb4", "utf8mb4_vi_0900_ai_ci": "utf8mb4", "utf8mb4_vi_0900_as_cs": "utf8mb4", "utf8mb4_vietnamese_ci": "utf8mb4", "utf8mb4_zh_0900_as_cs": "utf8mb4", "gb18030_unicode_520_ci": "gb18030", "utf8mb4_unicode_520_ci": "utf8mb4", "ucs2_general_mysql500_ci": "ucs2", "utf8_general_mysql500_ci": "utf8", "utf8mb4_de_pb_0900_ai_ci": "utf8mb4", "utf8mb4_de_pb_0900_as_cs": "utf8mb4", "utf8mb4_ja_0900_as_cs_ks": "utf8mb4", "utf8mb4_es_trad_0900_ai_ci": "utf8mb4", "utf8mb4_es_trad_0900_as_cs": "utf8mb4"} \ No newline at end of file diff --git a/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/collate2charset.maria b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/collate2charset.maria new file mode 100644 index 00000000..100fe455 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/is/collate2charset.maria @@ -0,0 +1 @@ +{"big5_chinese_ci":"big5", "big5_bin":"big5", "big5_chinese_nopad_ci":"big5", "big5_nopad_bin":"big5", "dec8_swedish_ci":"dec8", "dec8_bin":"dec8", "dec8_swedish_nopad_ci":"dec8", "dec8_nopad_bin":"dec8", "cp850_general_ci":"cp850", "cp850_bin":"cp850", "cp850_general_nopad_ci":"cp850", "cp850_nopad_bin":"cp850", "hp8_english_ci":"hp8", "hp8_bin":"hp8", "hp8_english_nopad_ci":"hp8", "hp8_nopad_bin":"hp8", "koi8r_general_ci":"koi8r", "koi8r_bin":"koi8r", "koi8r_general_nopad_ci":"koi8r", "koi8r_nopad_bin":"koi8r", "latin1_german1_ci":"latin1", "latin1_swedish_ci":"latin1", "latin1_danish_ci":"latin1", "latin1_german2_ci":"latin1", "latin1_bin":"latin1", "latin1_general_ci":"latin1", "latin1_general_cs":"latin1", "latin1_spanish_ci":"latin1", "latin1_swedish_nopad_ci":"latin1", "latin1_nopad_bin":"latin1", "latin2_czech_cs":"latin2", "latin2_general_ci":"latin2", "latin2_hungarian_ci":"latin2", "latin2_croatian_ci":"latin2", "latin2_bin":"latin2", "latin2_general_nopad_ci":"latin2", "latin2_nopad_bin":"latin2", "swe7_swedish_ci":"swe7", "swe7_bin":"swe7", "swe7_swedish_nopad_ci":"swe7", "swe7_nopad_bin":"swe7", "ascii_general_ci":"ascii", "ascii_bin":"ascii", "ascii_general_nopad_ci":"ascii", "ascii_nopad_bin":"ascii", "ujis_japanese_ci":"ujis", "ujis_bin":"ujis", "ujis_japanese_nopad_ci":"ujis", "ujis_nopad_bin":"ujis", "sjis_japanese_ci":"sjis", "sjis_bin":"sjis", "sjis_japanese_nopad_ci":"sjis", "sjis_nopad_bin":"sjis", "hebrew_general_ci":"hebrew", "hebrew_bin":"hebrew", "hebrew_general_nopad_ci":"hebrew", "hebrew_nopad_bin":"hebrew", "tis620_thai_ci":"tis620", "tis620_bin":"tis620", "tis620_thai_nopad_ci":"tis620", "tis620_nopad_bin":"tis620", "euckr_korean_ci":"euckr", "euckr_bin":"euckr", "euckr_korean_nopad_ci":"euckr", "euckr_nopad_bin":"euckr", "koi8u_general_ci":"koi8u", "koi8u_bin":"koi8u", "koi8u_general_nopad_ci":"koi8u", "koi8u_nopad_bin":"koi8u", "gb2312_chinese_ci":"gb2312", "gb2312_bin":"gb2312", "gb2312_chinese_nopad_ci":"gb2312", "gb2312_nopad_bin":"gb2312", "greek_general_ci":"greek", "greek_bin":"greek", "greek_general_nopad_ci":"greek", "greek_nopad_bin":"greek", "cp1250_general_ci":"cp1250", "cp1250_czech_cs":"cp1250", "cp1250_croatian_ci":"cp1250", "cp1250_bin":"cp1250", "cp1250_polish_ci":"cp1250", "cp1250_general_nopad_ci":"cp1250", "cp1250_nopad_bin":"cp1250", "gbk_chinese_ci":"gbk", "gbk_bin":"gbk", "gbk_chinese_nopad_ci":"gbk", "gbk_nopad_bin":"gbk", "latin5_turkish_ci":"latin5", "latin5_bin":"latin5", "latin5_turkish_nopad_ci":"latin5", "latin5_nopad_bin":"latin5", "armscii8_general_ci":"armscii8", "armscii8_bin":"armscii8", "armscii8_general_nopad_ci":"armscii8", "armscii8_nopad_bin":"armscii8", "utf8mb3_general_ci":"utf8mb3", "utf8mb3_bin":"utf8mb3", "utf8mb3_unicode_ci":"utf8mb3", "utf8mb3_icelandic_ci":"utf8mb3", "utf8mb3_latvian_ci":"utf8mb3", "utf8mb3_romanian_ci":"utf8mb3", "utf8mb3_slovenian_ci":"utf8mb3", "utf8mb3_polish_ci":"utf8mb3", "utf8mb3_estonian_ci":"utf8mb3", "utf8mb3_spanish_ci":"utf8mb3", "utf8mb3_swedish_ci":"utf8mb3", "utf8mb3_turkish_ci":"utf8mb3", "utf8mb3_czech_ci":"utf8mb3", "utf8mb3_danish_ci":"utf8mb3", "utf8mb3_lithuanian_ci":"utf8mb3", "utf8mb3_slovak_ci":"utf8mb3", "utf8mb3_spanish2_ci":"utf8mb3", "utf8mb3_roman_ci":"utf8mb3", "utf8mb3_persian_ci":"utf8mb3", "utf8mb3_esperanto_ci":"utf8mb3", "utf8mb3_hungarian_ci":"utf8mb3", "utf8mb3_sinhala_ci":"utf8mb3", "utf8mb3_german2_ci":"utf8mb3", "utf8mb3_croatian_mysql561_ci":"utf8mb3", "utf8mb3_unicode_520_ci":"utf8mb3", "utf8mb3_vietnamese_ci":"utf8mb3", "utf8mb3_general_mysql500_ci":"utf8mb3", "utf8mb3_croatian_ci":"utf8mb3", "utf8mb3_myanmar_ci":"utf8mb3", "utf8mb3_thai_520_w2":"utf8mb3", "utf8mb3_general_nopad_ci":"utf8mb3", "utf8mb3_nopad_bin":"utf8mb3", "utf8mb3_unicode_nopad_ci":"utf8mb3", "utf8mb3_unicode_520_nopad_ci":"utf8mb3", "ucs2_general_ci":"ucs2", "ucs2_bin":"ucs2", "ucs2_unicode_ci":"ucs2", "ucs2_icelandic_ci":"ucs2", "ucs2_latvian_ci":"ucs2", "ucs2_romanian_ci":"ucs2", "ucs2_slovenian_ci":"ucs2", "ucs2_polish_ci":"ucs2", "ucs2_estonian_ci":"ucs2", "ucs2_spanish_ci":"ucs2", "ucs2_swedish_ci":"ucs2", "ucs2_turkish_ci":"ucs2", "ucs2_czech_ci":"ucs2", "ucs2_danish_ci":"ucs2", "ucs2_lithuanian_ci":"ucs2", "ucs2_slovak_ci":"ucs2", "ucs2_spanish2_ci":"ucs2", "ucs2_roman_ci":"ucs2", "ucs2_persian_ci":"ucs2", "ucs2_esperanto_ci":"ucs2", "ucs2_hungarian_ci":"ucs2", "ucs2_sinhala_ci":"ucs2", "ucs2_german2_ci":"ucs2", "ucs2_croatian_mysql561_ci":"ucs2", "ucs2_unicode_520_ci":"ucs2", "ucs2_vietnamese_ci":"ucs2", "ucs2_general_mysql500_ci":"ucs2", "ucs2_croatian_ci":"ucs2", "ucs2_myanmar_ci":"ucs2", "ucs2_thai_520_w2":"ucs2", "ucs2_general_nopad_ci":"ucs2", "ucs2_nopad_bin":"ucs2", "ucs2_unicode_nopad_ci":"ucs2", "ucs2_unicode_520_nopad_ci":"ucs2", "cp866_general_ci":"cp866", "cp866_bin":"cp866", "cp866_general_nopad_ci":"cp866", "cp866_nopad_bin":"cp866", "keybcs2_general_ci":"keybcs2", "keybcs2_bin":"keybcs2", "keybcs2_general_nopad_ci":"keybcs2", "keybcs2_nopad_bin":"keybcs2", "macce_general_ci":"macce", "macce_bin":"macce", "macce_general_nopad_ci":"macce", "macce_nopad_bin":"macce", "macroman_general_ci":"macroman", "macroman_bin":"macroman", "macroman_general_nopad_ci":"macroman", "macroman_nopad_bin":"macroman", "cp852_general_ci":"cp852", "cp852_bin":"cp852", "cp852_general_nopad_ci":"cp852", "cp852_nopad_bin":"cp852", "latin7_estonian_cs":"latin7", "latin7_general_ci":"latin7", "latin7_general_cs":"latin7", "latin7_bin":"latin7", "latin7_general_nopad_ci":"latin7", "latin7_nopad_bin":"latin7", "utf8mb4_general_ci":"utf8mb4", "utf8mb4_bin":"utf8mb4", "utf8mb4_unicode_ci":"utf8mb4", "utf8mb4_icelandic_ci":"utf8mb4", "utf8mb4_latvian_ci":"utf8mb4", "utf8mb4_romanian_ci":"utf8mb4", "utf8mb4_slovenian_ci":"utf8mb4", "utf8mb4_polish_ci":"utf8mb4", "utf8mb4_estonian_ci":"utf8mb4", "utf8mb4_spanish_ci":"utf8mb4", "utf8mb4_swedish_ci":"utf8mb4", "utf8mb4_turkish_ci":"utf8mb4", "utf8mb4_czech_ci":"utf8mb4", "utf8mb4_danish_ci":"utf8mb4", "utf8mb4_lithuanian_ci":"utf8mb4", "utf8mb4_slovak_ci":"utf8mb4", "utf8mb4_spanish2_ci":"utf8mb4", "utf8mb4_roman_ci":"utf8mb4", "utf8mb4_persian_ci":"utf8mb4", "utf8mb4_esperanto_ci":"utf8mb4", "utf8mb4_hungarian_ci":"utf8mb4", "utf8mb4_sinhala_ci":"utf8mb4", "utf8mb4_german2_ci":"utf8mb4", "utf8mb4_croatian_mysql561_ci":"utf8mb4", "utf8mb4_unicode_520_ci":"utf8mb4", "utf8mb4_vietnamese_ci":"utf8mb4", "utf8mb4_croatian_ci":"utf8mb4", "utf8mb4_myanmar_ci":"utf8mb4", "utf8mb4_thai_520_w2":"utf8mb4", "utf8mb4_general_nopad_ci":"utf8mb4", "utf8mb4_nopad_bin":"utf8mb4", "utf8mb4_unicode_nopad_ci":"utf8mb4", "utf8mb4_unicode_520_nopad_ci":"utf8mb4", "cp1251_bulgarian_ci":"cp1251", "cp1251_ukrainian_ci":"cp1251", "cp1251_bin":"cp1251", "cp1251_general_ci":"cp1251", "cp1251_general_cs":"cp1251", "cp1251_nopad_bin":"cp1251", "cp1251_general_nopad_ci":"cp1251", "utf16_general_ci":"utf16", "utf16_bin":"utf16", "utf16_unicode_ci":"utf16", "utf16_icelandic_ci":"utf16", "utf16_latvian_ci":"utf16", "utf16_romanian_ci":"utf16", "utf16_slovenian_ci":"utf16", "utf16_polish_ci":"utf16", "utf16_estonian_ci":"utf16", "utf16_spanish_ci":"utf16", "utf16_swedish_ci":"utf16", "utf16_turkish_ci":"utf16", "utf16_czech_ci":"utf16", "utf16_danish_ci":"utf16", "utf16_lithuanian_ci":"utf16", "utf16_slovak_ci":"utf16", "utf16_spanish2_ci":"utf16", "utf16_roman_ci":"utf16", "utf16_persian_ci":"utf16", "utf16_esperanto_ci":"utf16", "utf16_hungarian_ci":"utf16", "utf16_sinhala_ci":"utf16", "utf16_german2_ci":"utf16", "utf16_croatian_mysql561_ci":"utf16", "utf16_unicode_520_ci":"utf16", "utf16_vietnamese_ci":"utf16", "utf16_croatian_ci":"utf16", "utf16_myanmar_ci":"utf16", "utf16_thai_520_w2":"utf16", "utf16_general_nopad_ci":"utf16", "utf16_nopad_bin":"utf16", "utf16_unicode_nopad_ci":"utf16", "utf16_unicode_520_nopad_ci":"utf16", "utf16le_general_ci":"utf16le", "utf16le_bin":"utf16le", "utf16le_general_nopad_ci":"utf16le", "utf16le_nopad_bin":"utf16le", "cp1256_general_ci":"cp1256", "cp1256_bin":"cp1256", "cp1256_general_nopad_ci":"cp1256", "cp1256_nopad_bin":"cp1256", "cp1257_lithuanian_ci":"cp1257", "cp1257_bin":"cp1257", "cp1257_general_ci":"cp1257", "cp1257_nopad_bin":"cp1257", "cp1257_general_nopad_ci":"cp1257", "utf32_general_ci":"utf32", "utf32_bin":"utf32", "utf32_unicode_ci":"utf32", "utf32_icelandic_ci":"utf32", "utf32_latvian_ci":"utf32", "utf32_romanian_ci":"utf32", "utf32_slovenian_ci":"utf32", "utf32_polish_ci":"utf32", "utf32_estonian_ci":"utf32", "utf32_spanish_ci":"utf32", "utf32_swedish_ci":"utf32", "utf32_turkish_ci":"utf32", "utf32_czech_ci":"utf32", "utf32_danish_ci":"utf32", "utf32_lithuanian_ci":"utf32", "utf32_slovak_ci":"utf32", "utf32_spanish2_ci":"utf32", "utf32_roman_ci":"utf32", "utf32_persian_ci":"utf32", "utf32_esperanto_ci":"utf32", "utf32_hungarian_ci":"utf32", "utf32_sinhala_ci":"utf32", "utf32_german2_ci":"utf32", "utf32_croatian_mysql561_ci":"utf32", "utf32_unicode_520_ci":"utf32", "utf32_vietnamese_ci":"utf32", "utf32_croatian_ci":"utf32", "utf32_myanmar_ci":"utf32", "utf32_thai_520_w2":"utf32", "utf32_general_nopad_ci":"utf32", "utf32_nopad_bin":"utf32", "utf32_unicode_nopad_ci":"utf32", "utf32_unicode_520_nopad_ci":"utf32", "binary":"binary", "geostd8_general_ci":"geostd8", "geostd8_bin":"geostd8", "geostd8_general_nopad_ci":"geostd8", "geostd8_nopad_bin":"geostd8", "cp932_japanese_ci":"cp932", "cp932_bin":"cp932", "cp932_japanese_nopad_ci":"cp932", "cp932_nopad_bin":"cp932", "eucjpms_japanese_ci":"eucjpms", "eucjpms_bin":"eucjpms", "eucjpms_japanese_nopad_ci":"eucjpms", "eucjpms_nopad_bin":"eucjpms"} \ No newline at end of file diff --git a/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/mysqlversion.go b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/mysqlversion.go new file mode 100644 index 00000000..ba23b7be --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/internal/mysqlversion/mysqlversion.go @@ -0,0 +1,150 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package mysqlversion + +import ( + "embed" + "encoding/json" + "fmt" + "strings" + + "golang.org/x/mod/semver" +) + +// V provides information about MySQL versions. +type V string + +// SupportsCheck reports if the version supports the CHECK +// clause, and return the querying for getting them. +func (v V) SupportsCheck() bool { + u := "8.0.16" + if v.Maria() { + u = "10.2.1" + } + return v.GTE(u) +} + +// SupportsIndexExpr reports if the version supports +// index expressions (functional key part). +func (v V) SupportsIndexExpr() bool { + return !v.Maria() && v.GTE("8.0.13") +} + +// SupportsDisplayWidth reports if the version supports getting +// the display width information from the information schema. +func (v V) SupportsDisplayWidth() bool { + // MySQL v8.0.19 dropped the display width + // information from the information schema + return v.Maria() || v.LT("8.0.19") +} + +// SupportsExprDefault reports if the version supports +// expressions in the DEFAULT clause on column definition. +func (v V) SupportsExprDefault() bool { + u := "8.0.13" + if v.Maria() { + u = "10.2.1" + } + return v.GTE(u) +} + +// SupportsEnforceCheck reports if the version supports +// the ENFORCED option in CHECK constraint syntax. +func (v V) SupportsEnforceCheck() bool { + return !v.Maria() && v.GTE("8.0.16") +} + +// SupportsGeneratedColumns reports if the version supports +// the generated columns in information schema. +func (v V) SupportsGeneratedColumns() bool { + u := "5.7" + if v.Maria() { + u = "10.2" + } + return v.GTE(u) +} + +// SupportsRenameColumn reports if the version supports +// the "RENAME COLUMN" clause. +func (v V) SupportsRenameColumn() bool { + u := "8" + if v.Maria() { + u = "10.5.2" + } + return v.GTE(u) +} + +// SupportsIndexComment reports if the version +// supports comments on indexes. +func (v V) SupportsIndexComment() bool { + // According to Oracle release notes, comments on + // indexes were added in version 5.5.3. + return v.Maria() || v.GTE("5.5.3") +} + +// CharsetToCollate returns the mapping from charset to its default collation. +func (v V) CharsetToCollate() (map[string]string, error) { + name := "is/charset2collate" + if v.Maria() { + name += ".maria" + } + return decode(name) +} + +// CollateToCharset returns the mapping from a collation to its charset. +func (v V) CollateToCharset() (map[string]string, error) { + name := "is/collate2charset" + if v.Maria() { + name += ".maria" + } + return decode(name) +} + +// Maria reports if the MySQL version is MariaDB. +func (v V) Maria() bool { + return strings.Index(string(v), "MariaDB") > 0 +} + +// TiDB reports if the MySQL version is TiDB. +func (v V) TiDB() bool { + return strings.Index(string(v), "TiDB") > 0 +} + +// Compare returns an integer comparing two versions according to +// semantic version precedence. +func (v V) Compare(w string) int { + u := string(v) + switch idx := strings.Index(u, "-"); { + case v.Maria(): + u = u[:strings.Index(u, "MariaDB")-1] + case v.TiDB(): + u = u[:strings.Index(u, "TiDB")-1] + case idx > 0: + // Remove server build information, if any. + u = u[:idx] + } + return semver.Compare("v"+u, "v"+w) +} + +// GTE reports if the version is >= w. +func (v V) GTE(w string) bool { return v.Compare(w) >= 0 } + +// LT reports if the version is < w. +func (v V) LT(w string) bool { return v.Compare(w) == -1 } + +//go:embed is/* +var encoding embed.FS + +func decode(name string) (map[string]string, error) { + f, err := encoding.Open(name) + if err != nil { + return nil, err + } + var m map[string]string + if err := json.NewDecoder(f).Decode(&m); err != nil { + return nil, fmt.Errorf("decode %q", name) + } + return m, nil +} diff --git a/vendor/ariga.io/atlas/sql/mysql/migrate.go b/vendor/ariga.io/atlas/sql/mysql/migrate.go new file mode 100644 index 00000000..98d598f1 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/migrate.go @@ -0,0 +1,802 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package mysql + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" +) + +var ( + noConn = conn{ExecQuerier: sqlx.NoRows, V: "8.0.31"} + // DefaultPlan provides basic planning capabilities for MySQL dialects. + // Note, it is recommended to call Open, create a new Driver and use its + // migrate.PlanApplier when a database connection is available. + DefaultPlan migrate.PlanApplier = &planApply{conn: noConn} +) + +// A planApply provides migration capabilities for schema elements. +type planApply struct{ conn } + +// PlanChanges returns a migration plan for the given schema changes. +func (p *planApply) PlanChanges(_ context.Context, name string, changes []schema.Change, opts ...migrate.PlanOption) (*migrate.Plan, error) { + s := &state{ + conn: p.conn, + Plan: migrate.Plan{ + Name: name, + // All statements generated by state will cause implicit commit. + // https://dev.mysql.com/doc/refman/8.0/en/implicit-commit.html + Transactional: false, + }, + } + for _, o := range opts { + o(&s.PlanOptions) + } + if err := s.plan(changes); err != nil { + return nil, err + } + if err := sqlx.SetReversible(&s.Plan); err != nil { + return nil, err + } + return &s.Plan, nil +} + +// ApplyChanges applies the changes on the database. An error is returned +// if the driver is unable to produce a plan to it, or one of the statements +// is failed or unsupported. +func (p *planApply) ApplyChanges(ctx context.Context, changes []schema.Change, opts ...migrate.PlanOption) error { + return sqlx.ApplyChanges(ctx, changes, p, opts...) +} + +// state represents the state of a planning. It is not part of +// planApply so that multiple planning/applying can be called +// in parallel. +type state struct { + conn + migrate.Plan + migrate.PlanOptions +} + +// plan builds the migration plan for applying the +// given changes on the attached connection. +func (s *state) plan(changes []schema.Change) error { + if s.SchemaQualifier != nil { + if err := sqlx.CheckChangesScope(changes); err != nil { + return err + } + } + planned, err := s.topLevel(changes) + if err != nil { + return err + } + planned, err = sqlx.DetachCycles(planned) + if err != nil { + return err + } + for _, c := range planned { + switch c := c.(type) { + case *schema.AddTable: + err = s.addTable(c) + case *schema.DropTable: + err = s.dropTable(c) + case *schema.ModifyTable: + err = s.modifyTable(c) + case *schema.RenameTable: + s.renameTable(c) + default: + err = fmt.Errorf("unsupported change %T", c) + } + if err != nil { + return err + } + } + return nil +} + +// topLevel appends first the changes for creating or dropping schemas (top-level schema elements). +func (s *state) topLevel(changes []schema.Change) ([]schema.Change, error) { + planned := make([]schema.Change, 0, len(changes)) + for _, c := range changes { + switch c := c.(type) { + case *schema.AddSchema: + b := s.Build("CREATE DATABASE") + if sqlx.Has(c.Extra, &schema.IfNotExists{}) { + b.P("IF NOT EXISTS") + } + b.Ident(c.S.Name) + // Schema was created with CHARSET, and it is not the default database character set. + if a := (schema.Charset{}); sqlx.Has(c.S.Attrs, &a) && a.V != "" && a.V != s.charset { + b.P("CHARSET", a.V) + } + // Schema was created with COLLATE, and it is not the default database collation. + if a := (schema.Collation{}); sqlx.Has(c.S.Attrs, &a) && a.V != "" && a.V != s.collate { + b.P("COLLATE", a.V) + } + s.append(&migrate.Change{ + Cmd: b.String(), + Source: c, + Reverse: s.Build("DROP DATABASE").Ident(c.S.Name).String(), + Comment: fmt.Sprintf("add new schema named %q", c.S.Name), + }) + case *schema.DropSchema: + b := s.Build("DROP DATABASE") + if sqlx.Has(c.Extra, &schema.IfExists{}) { + b.P("IF EXISTS") + } + b.Ident(c.S.Name) + s.append(&migrate.Change{ + Cmd: b.String(), + Source: c, + Comment: fmt.Sprintf("drop schema named %q", c.S.Name), + }) + case *schema.ModifySchema: + if err := s.modifySchema(c); err != nil { + return nil, err + } + default: + planned = append(planned, c) + } + } + return planned, nil +} + +// modifySchema builds and appends the migrate.Changes for bringing +// the schema into its modified state. +func (s *state) modifySchema(modify *schema.ModifySchema) error { + b, r := s.Build(), s.Build() + for _, change := range modify.Changes { + switch change := change.(type) { + // Add schema attributes to an existing schema only if + // it is different from the default server configuration. + case *schema.AddAttr: + switch a := change.A.(type) { + case *schema.Charset: + if a.V != "" && a.V != s.charset { + b.P("CHARSET", a.V) + r.P("CHARSET", s.charset) + } + case *schema.Collation: + if a.V != "" && a.V != s.collate { + b.P("COLLATE", a.V) + r.P("COLLATE", s.collate) + } + default: + return fmt.Errorf("unexpected schema AddAttr: %T", a) + } + case *schema.ModifyAttr: + switch to := change.To.(type) { + case *schema.Charset: + from, ok := change.From.(*schema.Charset) + if !ok { + return fmt.Errorf("mismatch ModifyAttr attributes: %T != %T", change.To, change.From) + } + b.P("CHARSET", to.V) + r.P("CHARSET", from.V) + case *schema.Collation: + from, ok := change.From.(*schema.Collation) + if !ok { + return fmt.Errorf("mismatch ModifyAttr attributes: %T != %T", change.To, change.From) + } + b.P("COLLATE", to.V) + r.P("COLLATE", from.V) + default: + return fmt.Errorf("unexpected schema ModifyAttr: %T", change) + } + default: + return fmt.Errorf("unsupported ModifySchema change %T", change) + } + } + if b.Len() > 0 { + bs := s.Build("ALTER DATABASE").Ident(modify.S.Name) + rs := bs.Clone() + bs.WriteString(b.String()) + rs.WriteString(r.String()) + s.append(&migrate.Change{ + Cmd: bs.String(), + Reverse: rs.String(), + Source: modify, + Comment: fmt.Sprintf("modify %q schema", modify.S.Name), + }) + } + return nil +} + +// addTable builds and appends a migration change +// for creating a table in a schema. +func (s *state) addTable(add *schema.AddTable) error { + var ( + errs []string + b = s.Build("CREATE TABLE") + ) + if sqlx.Has(add.Extra, &schema.IfNotExists{}) { + b.P("IF NOT EXISTS") + } + b.Table(add.T) + if len(add.T.Columns) == 0 { + return fmt.Errorf("table %q has no columns", add.T.Name) + } + b.WrapIndent(func(b *sqlx.Builder) { + b.MapIndent(add.T.Columns, func(i int, b *sqlx.Builder) { + if err := s.column(b, add.T, add.T.Columns[i]); err != nil { + errs = append(errs, err.Error()) + } + }) + if pk := add.T.PrimaryKey; pk != nil { + b.Comma().NL().P("PRIMARY KEY") + indexTypeParts(b, pk) + } + if len(add.T.Indexes) > 0 { + b.Comma() + } + b.MapIndent(add.T.Indexes, func(i int, b *sqlx.Builder) { + idx := add.T.Indexes[i] + index(b, idx) + }) + if len(add.T.ForeignKeys) > 0 { + b.Comma() + if err := s.fks(b.MapIndentErr, add.T.ForeignKeys...); err != nil { + errs = append(errs, err.Error()) + } + } + for _, attr := range add.T.Attrs { + if c, ok := attr.(*schema.Check); ok { + b.Comma().NL() + s.check(b, c) + } + } + }) + if len(errs) > 0 { + return fmt.Errorf("create table %q: %s", add.T.Name, strings.Join(errs, ", ")) + } + s.tableAttr(b, add, add.T.Attrs...) + s.append(&migrate.Change{ + Cmd: b.String(), + Source: add, + Reverse: s.Build("DROP TABLE").Table(add.T).String(), + Comment: fmt.Sprintf("create %q table", add.T.Name), + }) + return nil +} + +// dropTable builds and appends the migrate.Change +// for dropping a table from a schema. +func (s *state) dropTable(drop *schema.DropTable) error { + rs := &state{conn: s.conn, PlanOptions: s.PlanOptions} + if err := rs.addTable(&schema.AddTable{T: drop.T}); err != nil { + return fmt.Errorf("calculate reverse for drop table %q: %w", drop.T.Name, err) + } + b := s.Build("DROP TABLE") + if sqlx.Has(drop.Extra, &schema.IfExists{}) { + b.P("IF EXISTS") + } + b.Table(drop.T) + s.append(&migrate.Change{ + Cmd: b.String(), + Source: drop, + Reverse: rs.Changes[0].Cmd, + Comment: fmt.Sprintf("drop %q table", drop.T.Name), + }) + return nil +} + +// modifyTable builds and appends the migration changes for +// bringing the table into its modified state. +func (s *state) modifyTable(modify *schema.ModifyTable) error { + var changes [2][]schema.Change + if len(modify.T.Columns) == 0 { + return fmt.Errorf("table %q has no columns; drop the table instead", modify.T.Name) + } + for _, change := range skipAutoChanges(modify.Changes) { + switch change := change.(type) { + // Foreign-key modification is translated into 2 steps. + // Dropping the current foreign key and creating a new one. + case *schema.ModifyForeignKey: + // DROP and ADD of the same constraint cannot be mixed + // on the ALTER TABLE command. + changes[0] = append(changes[0], &schema.DropForeignKey{ + F: change.From, + }) + // Drop the auto-created index for referenced if the reference was changed. + if change.Change.Is(schema.ChangeRefTable | schema.ChangeRefColumn) { + changes[0] = append(changes[0], &schema.DropIndex{ + I: &schema.Index{ + Name: change.From.Symbol, + Table: modify.T, + }, + }) + } + changes[1] = append(changes[1], &schema.AddForeignKey{ + F: change.To, + }) + // Index modification requires rebuilding the index. + case *schema.ModifyIndex: + changes[0] = append(changes[0], &schema.DropIndex{ + I: change.From, + }) + changes[1] = append(changes[1], &schema.AddIndex{ + I: change.To, + }) + case *schema.DropAttr: + return fmt.Errorf("unsupported change type: %v", change.A) + default: + changes[1] = append(changes[1], change) + } + } + for i := range changes { + if len(changes[i]) > 0 { + if err := s.alterTable(modify.T, changes[i]); err != nil { + return err + } + } + } + return nil +} + +// alterTable modifies the given table by executing on it a list of +// changes in one SQL statement. +func (s *state) alterTable(t *schema.Table, changes []schema.Change) error { + var ( + reverse []schema.Change + reversible = true + ) + build := func(changes []schema.Change) (string, error) { + b := s.Build("ALTER TABLE").Table(t) + err := b.MapCommaErr(changes, func(i int, b *sqlx.Builder) error { + switch change := changes[i].(type) { + case *schema.AddColumn: + b.P("ADD COLUMN") + if err := s.column(b, t, change.C); err != nil { + return err + } + reverse = append(reverse, &schema.DropColumn{C: change.C}) + case *schema.ModifyColumn: + if err := checkChangeGenerated(change.From, change.To); err != nil { + return err + } + b.P("MODIFY COLUMN") + if err := s.column(b, t, change.To); err != nil { + return err + } + reverse = append(reverse, &schema.ModifyColumn{ + From: change.To, + To: change.From, + Change: change.Change, + }) + case *schema.RenameColumn: + if s.SupportsRenameColumn() { + b.P("RENAME COLUMN").Ident(change.From.Name).P("TO").Ident(change.To.Name) + } else { + b.P("CHANGE COLUMN").Ident(change.From.Name) + if err := s.column(b, t, change.To); err != nil { + return err + } + } + reverse = append(reverse, &schema.RenameColumn{From: change.To, To: change.From}) + case *schema.DropColumn: + b.P("DROP COLUMN").Ident(change.C.Name) + reverse = append(reverse, &schema.AddColumn{C: change.C}) + case *schema.AddIndex: + b.P("ADD") + index(b, change.I) + reverse = append(reverse, &schema.DropIndex{I: change.I}) + case *schema.RenameIndex: + b.P("RENAME INDEX").Ident(change.From.Name).P("TO").Ident(change.To.Name) + reverse = append(reverse, &schema.RenameIndex{From: change.To, To: change.From}) + case *schema.DropIndex: + b.P("DROP INDEX").Ident(change.I.Name) + reverse = append(reverse, &schema.AddIndex{I: change.I}) + case *schema.AddPrimaryKey: + b.P("ADD PRIMARY KEY") + indexTypeParts(b, change.P) + reverse = append(reverse, &schema.DropPrimaryKey{P: change.P}) + case *schema.DropPrimaryKey: + b.P("DROP PRIMARY KEY") + reverse = append(reverse, &schema.AddPrimaryKey{P: change.P}) + case *schema.ModifyPrimaryKey: + b.P("DROP PRIMARY KEY, ADD PRIMARY KEY") + indexTypeParts(b, change.To) + reverse = append(reverse, &schema.ModifyPrimaryKey{From: change.To, To: change.From, Change: change.Change}) + case *schema.AddForeignKey: + b.P("ADD") + if err := s.fks(b.MapCommaErr, change.F); err != nil { + return err + } + reverse = append(reverse, &schema.DropForeignKey{F: change.F}) + case *schema.DropForeignKey: + b.P("DROP FOREIGN KEY").Ident(change.F.Symbol) + reverse = append(reverse, &schema.AddForeignKey{F: change.F}) + case *schema.AddAttr: + s.tableAttr(b, change, change.A) + // Unsupported reverse operation. + reversible = false + case *schema.ModifyAttr: + s.tableAttr(b, change, change.To) + reverse = append(reverse, &schema.ModifyAttr{ + From: change.To, + To: change.From, + }) + case *schema.AddCheck: + s.check(b.P("ADD"), change.C) + // Reverse operation is supported if + // the constraint name is not generated. + if reversible = reversible && change.C.Name != ""; reversible { + reverse = append(reverse, &schema.DropCheck{C: change.C}) + } + case *schema.DropCheck: + b.P("DROP CONSTRAINT").Ident(change.C.Name) + reverse = append(reverse, &schema.AddCheck{C: change.C}) + case *schema.ModifyCheck: + switch { + case change.From.Name == "": + return errors.New("cannot modify unnamed check constraint") + case change.From.Name != change.To.Name: + return fmt.Errorf("mismatch check constraint names: %q != %q", change.From.Name, change.To.Name) + // Enforcement added. + case s.SupportsEnforceCheck() && sqlx.Has(change.From.Attrs, &Enforced{}) && !sqlx.Has(change.To.Attrs, &Enforced{}): + b.P("ALTER CHECK").Ident(change.From.Name).P("ENFORCED") + // Enforcement dropped. + case s.SupportsEnforceCheck() && !sqlx.Has(change.From.Attrs, &Enforced{}) && sqlx.Has(change.To.Attrs, &Enforced{}): + b.P("ALTER CHECK").Ident(change.From.Name).P("NOT ENFORCED") + // Expr was changed. + case change.From.Expr != change.To.Expr: + b.P("DROP CHECK").Ident(change.From.Name).Comma().P("ADD") + s.check(b, change.To) + default: + return errors.New("unknown check constraint change") + } + reverse = append(reverse, &schema.ModifyCheck{ + From: change.To, + To: change.From, + }) + } + return nil + }) + if err != nil { + return "", err + } + return b.String(), nil + } + cmd, err := build(changes) + if err != nil { + return fmt.Errorf("alter table %q: %v", t.Name, err) + } + change := &migrate.Change{ + Cmd: cmd, + Source: &schema.ModifyTable{ + T: t, + Changes: changes, + }, + Comment: fmt.Sprintf("modify %q table", t.Name), + } + if reversible { + // Changes should be reverted in + // a reversed order they were created. + sqlx.ReverseChanges(reverse) + if change.Reverse, err = build(reverse); err != nil { + return fmt.Errorf("reversd alter table %q: %v", t.Name, err) + } + } + s.append(change) + return nil +} + +func (s *state) renameTable(c *schema.RenameTable) { + s.append(&migrate.Change{ + Source: c, + Comment: fmt.Sprintf("rename a table from %q to %q", c.From.Name, c.To.Name), + Cmd: s.Build("RENAME TABLE").Table(c.From).P("TO").Table(c.To).String(), + Reverse: s.Build("RENAME TABLE").Table(c.To).P("TO").Table(c.From).String(), + }) +} + +func (s *state) column(b *sqlx.Builder, t *schema.Table, c *schema.Column) error { + typ, err := FormatType(c.Type.Type) + if err != nil { + return fmt.Errorf("format type for column %q: %w", c.Name, err) + } + b.Ident(c.Name).P(typ) + if cs := (schema.Charset{}); sqlx.Has(c.Attrs, &cs) { + if !supportsCharset(c.Type.Type) { + return fmt.Errorf("column %q of type %T does not support the CHARSET attribute", c.Name, c.Type.Type) + } + // Define the charset explicitly + // in case it is not the default. + if s.character(t) != cs.V { + b.P("CHARSET", cs.V) + } + } + var ( + x schema.GeneratedExpr + asX = sqlx.Has(c.Attrs, &x) + ) + if asX { + b.P("AS", sqlx.MayWrap(x.Expr), x.Type) + } + // MariaDB does not accept [NOT NULL | NULL] + // as part of the generated columns' syntax. + if !asX || !s.Maria() { + if !c.Type.Null { + b.P("NOT") + } + b.P("NULL") + } + s.columnDefault(b, c) + // Add manually the JSON_VALID constraint for older + // versions < 10.4.3. See Driver.checks for full info. + if _, ok := c.Type.Type.(*schema.JSONType); ok && s.Maria() && s.LT("10.4.3") && !sqlx.Has(c.Attrs, &schema.Check{}) { + b.P("CHECK").Wrap(func(b *sqlx.Builder) { + b.WriteString(fmt.Sprintf("json_valid(`%s`)", c.Name)) + }) + } + for _, a := range c.Attrs { + switch a := a.(type) { + case *schema.Charset: + // CHARSET is handled above in the "data_type" stage. + case *schema.Collation: + if !supportsCharset(c.Type.Type) { + return fmt.Errorf("column %q of type %T does not support the COLLATE attribute", c.Name, c.Type.Type) + } + // Define the collation explicitly + // in case it is not the default. + if s.collation(t) != a.V { + b.P("COLLATE", a.V) + } + case *OnUpdate: + b.P("ON UPDATE", a.A) + case *AutoIncrement: + b.P("AUTO_INCREMENT") + // Auto increment with value should be configured on table options. + if a.V > 0 && !sqlx.Has(t.Attrs, &AutoIncrement{}) { + t.Attrs = append(t.Attrs, a) + } + default: + s.attr(b, a) + } + } + return nil +} + +func index(b *sqlx.Builder, idx *schema.Index) { + switch t := indexType(idx.Attrs); { + case idx.Unique: + b.P("UNIQUE") + case t.T == IndexTypeFullText || t.T == IndexTypeSpatial: + b.P(t.T) + } + b.P("INDEX").Ident(idx.Name) + indexTypeParts(b, idx) + if c := (schema.Comment{}); sqlx.Has(idx.Attrs, &c) { + b.P("COMMENT", quote(c.Text)) + } +} + +func indexTypeParts(b *sqlx.Builder, idx *schema.Index) { + // Skip BTREE as it is the default type. + if t := indexType(idx.Attrs); t.T == IndexTypeHash { + b.P("USING", t.T) + } + b.Wrap(func(b *sqlx.Builder) { + b.MapComma(idx.Parts, func(i int, b *sqlx.Builder) { + switch part := idx.Parts[i]; { + case part.C != nil: + b.Ident(part.C.Name) + case part.X != nil: + b.WriteString(sqlx.MayWrap(part.X.(*schema.RawExpr).X)) + } + if s := (&SubPart{}); sqlx.Has(idx.Parts[i].Attrs, s) { + b.WriteString(fmt.Sprintf("(%d)", s.Len)) + } + // Ignore default collation (i.e. "ASC") + if idx.Parts[i].Desc { + b.P("DESC") + } + }) + }) +} + +func (s *state) fks(commaF func(any, func(int, *sqlx.Builder) error) error, fks ...*schema.ForeignKey) error { + return commaF(fks, func(i int, b *sqlx.Builder) error { + fk := fks[i] + if fk.Symbol != "" { + b.P("CONSTRAINT").Ident(fk.Symbol) + } + b.P("FOREIGN KEY") + b.Wrap(func(b *sqlx.Builder) { + b.MapComma(fk.Columns, func(i int, b *sqlx.Builder) { + b.Ident(fk.Columns[i].Name) + }) + }) + b.P("REFERENCES").Table(fk.RefTable) + b.Wrap(func(b *sqlx.Builder) { + b.MapComma(fk.RefColumns, func(i int, b *sqlx.Builder) { + b.Ident(fk.RefColumns[i].Name) + }) + }) + if fk.OnUpdate != "" { + b.P("ON UPDATE", string(fk.OnUpdate)) + } + if fk.OnDelete != "" { + b.P("ON DELETE", string(fk.OnDelete)) + } + if fk.OnUpdate == schema.SetNull || fk.OnDelete == schema.SetNull { + for _, c := range fk.Columns { + if !c.Type.Null { + return fmt.Errorf("foreign key constraint was %[1]q SET NULL, but column %[1]q is NOT NULL", c.Name) + } + } + } + return nil + }) +} + +// tableAttr writes the given table attribute to the SQL +// statement builder when a table is created or altered. +func (s *state) tableAttr(b *sqlx.Builder, c schema.Change, attrs ...schema.Attr) { + for _, a := range attrs { + switch a := a.(type) { + case *CreateOptions: + b.P(a.V) + case *AutoIncrement: + // Update the AUTO_INCREMENT if it is an update change or it is not the default. + if _, ok := c.(*schema.ModifyAttr); ok || a.V > 1 { + b.P("AUTO_INCREMENT", strconv.FormatInt(a.V, 10)) + } + case *schema.Check: + // Ignore CHECK constraints as they are not real attributes, + // and handled on CREATE or ALTER. + case *schema.Charset: + b.P("CHARSET", a.V) + case *schema.Collation: + b.P("COLLATE", a.V) + case *schema.Comment: + b.P("COMMENT", quote(a.Text)) + } + } +} + +// character returns the table character-set from its attributes +// or from the default defined in the schema or the database. +func (s *state) character(t *schema.Table) string { + var c schema.Charset + if sqlx.Has(t.Attrs, &c) || t.Schema != nil && sqlx.Has(t.Schema.Attrs, &c) { + return c.V + } + return s.charset +} + +// collation returns the table collation from its attributes +// or from the default defined in the schema or the database. +func (s *state) collation(t *schema.Table) string { + var c schema.Collation + if sqlx.Has(t.Attrs, &c) || t.Schema != nil && sqlx.Has(t.Schema.Attrs, &c) { + return c.V + } + return s.collate +} + +func (s *state) append(c *migrate.Change) { + s.Changes = append(s.Changes, c) +} + +func (*state) attr(b *sqlx.Builder, attrs ...schema.Attr) { + for _, a := range attrs { + switch a := a.(type) { + case *schema.Collation: + b.P("COLLATE", a.V) + case *schema.Comment: + b.P("COMMENT", quote(a.Text)) + } + } +} + +// columnDefault writes the default value of column to the builder. +func (s *state) columnDefault(b *sqlx.Builder, c *schema.Column) { + switch x := c.Default.(type) { + case *schema.Literal: + v := x.V + if !hasNumericDefault(c.Type.Type) && !isHex(v) { + v = quote(v) + } + b.P("DEFAULT", v) + case *schema.RawExpr: + v := x.X + // For backwards compatibility, quote raw expressions that are not wrapped + // with parens for non-numeric column types (i.e. literals). + switch t := c.Type.Type; { + case isHex(v), hasNumericDefault(t), strings.HasPrefix(v, "(") && strings.HasSuffix(v, ")"): + default: + if _, ok := t.(*schema.TimeType); !ok || !strings.HasPrefix(strings.ToLower(v), currentTS) { + v = quote(v) + } + } + b.P("DEFAULT", v) + } +} + +// Build instantiates a new builder and writes the given phrase to it. +func (s *state) Build(phrases ...string) *sqlx.Builder { + b := &sqlx.Builder{QuoteChar: '`', Schema: s.SchemaQualifier, Indent: s.Indent} + return b.P(phrases...) +} + +// skipAutoChanges filters unnecessary changes that are automatically +// happened by the database when ALTER TABLE is executed. +func skipAutoChanges(changes []schema.Change) []schema.Change { + var ( + dropC = make(map[string]bool) + planned = make([]schema.Change, 0, len(changes)) + ) + for _, c := range changes { + if c, ok := c.(*schema.DropColumn); ok { + dropC[c.C.Name] = true + } + } + for i, c := range changes { + // Simple case for skipping key dropping, if its columns are dropped. + // https://dev.mysql.com/doc/refman/8.0/en/alter-table.html#alter-table-add-drop-column + c, ok := c.(*schema.DropIndex) + if !ok { + planned = append(planned, changes[i]) + continue + } + for _, p := range c.I.Parts { + if p.C == nil || !dropC[p.C.Name] { + planned = append(planned, c) + break + } + } + } + return planned +} + +// checks writes the CHECK constraint to the builder. +func (s *state) check(b *sqlx.Builder, c *schema.Check) { + if c.Name != "" { + b.P("CONSTRAINT").Ident(c.Name) + } + b.P("CHECK", sqlx.MayWrap(c.Expr)) + if s.SupportsEnforceCheck() && sqlx.Has(c.Attrs, &Enforced{}) { + b.P("ENFORCED") + } +} + +// supportsCharset reports if the given type supports the CHARSET and COLLATE +// clauses. See: https://dev.mysql.com/doc/refman/8.0/en/charset-column.html +func supportsCharset(t schema.Type) bool { + switch t.(type) { + case *schema.StringType, *schema.EnumType, *SetType: + return true + default: + return false + } +} + +// checkChangeGenerated checks if the change of a generated column is valid. +func checkChangeGenerated(from, to *schema.Column) error { + var fromX, toX schema.GeneratedExpr + switch fromHas, toHas := sqlx.Has(from.Attrs, &fromX), sqlx.Has(to.Attrs, &toX); { + case !fromHas && toHas && storedOrVirtual(toX.Type) == virtual: + return fmt.Errorf("changing column %q to VIRTUAL generated column is not supported (drop and add is required)", from.Name) + case fromHas && !toHas && storedOrVirtual(fromX.Type) == virtual: + return fmt.Errorf("changing VIRTUAL generated column %q to non-generated column is not supported (drop and add is required)", from.Name) + case fromHas && toHas && storedOrVirtual(fromX.Type) != storedOrVirtual(toX.Type): + return fmt.Errorf("changing the store type of generated column %q from %q to %q is not supported", from.Name, storedOrVirtual(fromX.Type), storedOrVirtual(toX.Type)) + } + return nil +} + +func quote(s string) string { + if sqlx.IsQuoted(s, '"', '\'') { + return s + } + return strconv.Quote(s) +} diff --git a/vendor/ariga.io/atlas/sql/mysql/sqlspec.go b/vendor/ariga.io/atlas/sql/mysql/sqlspec.go new file mode 100644 index 00000000..e36cf6ac --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/sqlspec.go @@ -0,0 +1,492 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package mysql + +import ( + "errors" + "fmt" + "reflect" + "strings" + + "ariga.io/atlas/schemahcl" + "ariga.io/atlas/sql/internal/specutil" + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/schema" + "ariga.io/atlas/sql/sqlspec" + + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/zclconf/go-cty/cty" +) + +type doc struct { + Tables []*sqlspec.Table `spec:"table"` + Schemas []*sqlspec.Schema `spec:"schema"` +} + +// evalSpec evaluates an Atlas DDL document into v using the input. +func evalSpec(p *hclparse.Parser, v any, input map[string]cty.Value) error { + switch v := v.(type) { + case *schema.Realm: + var d doc + if err := hclState.Eval(p, &d, input); err != nil { + return err + } + err := specutil.Scan(v, d.Schemas, d.Tables, convertTable) + if err != nil { + return fmt.Errorf("mysql: failed converting to *schema.Realm: %w", err) + } + for _, spec := range d.Schemas { + s, ok := v.Schema(spec.Name) + if !ok { + return fmt.Errorf("could not find schema: %q", spec.Name) + } + if err := convertCharset(spec, &s.Attrs); err != nil { + return err + } + } + case *schema.Schema: + var d doc + if err := hclState.Eval(p, &d, input); err != nil { + return err + } + if len(d.Schemas) != 1 { + return fmt.Errorf("mysql: expecting document to contain a single schema, got %d", len(d.Schemas)) + } + var r schema.Realm + if err := specutil.Scan(&r, d.Schemas, d.Tables, convertTable); err != nil { + return err + } + if err := convertCharset(d.Schemas[0], &r.Schemas[0].Attrs); err != nil { + return err + } + r.Schemas[0].Realm = nil + *v = *r.Schemas[0] + case schema.Schema, schema.Realm: + return fmt.Errorf("mysql: Eval expects a pointer: received %[1]T, expected *%[1]T", v) + default: + return hclState.Eval(p, v, input) + } + return nil +} + +// MarshalSpec marshals v into an Atlas DDL document using a schemahcl.Marshaler. +func MarshalSpec(v any, marshaler schemahcl.Marshaler) ([]byte, error) { + return specutil.Marshal(v, marshaler, schemaSpec) +} + +var ( + hclState = schemahcl.New( + schemahcl.WithTypes("table.column.type", TypeRegistry.Specs()), + schemahcl.WithScopedEnums("table.index.type", IndexTypeBTree, IndexTypeHash, IndexTypeFullText, IndexTypeSpatial), + schemahcl.WithScopedEnums("table.primary_key.type", IndexTypeBTree, IndexTypeHash, IndexTypeFullText, IndexTypeSpatial), + schemahcl.WithScopedEnums("table.column.as.type", stored, persistent, virtual), + schemahcl.WithScopedEnums("table.foreign_key.on_update", specutil.ReferenceVars...), + schemahcl.WithScopedEnums("table.foreign_key.on_delete", specutil.ReferenceVars...), + ) + // MarshalHCL marshals v into an Atlas HCL DDL document. + MarshalHCL = schemahcl.MarshalerFunc(func(v any) ([]byte, error) { + return MarshalSpec(v, hclState) + }) + // EvalHCL implements the schemahcl.Evaluator interface. + EvalHCL = schemahcl.EvalFunc(evalSpec) + + // EvalHCLBytes is a helper that evaluates an HCL document from a byte slice instead + // of from an hclparse.Parser instance. + EvalHCLBytes = specutil.HCLBytesFunc(EvalHCL) +) + +// convertTable converts a sqlspec.Table to a schema.Table. Table conversion is done without converting +// ForeignKeySpecs into ForeignKeys, as the target tables do not necessarily exist in the schema +// at this point. Instead, the linking is done by the convertSchema function. +func convertTable(spec *sqlspec.Table, parent *schema.Schema) (*schema.Table, error) { + t, err := specutil.Table(spec, parent, convertColumn, convertPK, convertIndex, convertCheck) + if err != nil { + return nil, err + } + if err := convertCharset(spec, &t.Attrs); err != nil { + return nil, err + } + // MySQL allows setting the initial AUTO_INCREMENT value + // on the table definition. + if attr, ok := spec.Attr("auto_increment"); ok { + v, err := attr.Int64() + if err != nil { + return nil, err + } + t.AddAttrs(&AutoIncrement{V: v}) + } + return t, err +} + +// convertPK converts a sqlspec.PrimaryKey into a schema.Index. +func convertPK(spec *sqlspec.PrimaryKey, parent *schema.Table) (*schema.Index, error) { + idx, err := specutil.PrimaryKey(spec, parent) + if err != nil { + return nil, err + } + if err := convertIndexType(spec, idx); err != nil { + return nil, err + } + return idx, nil +} + +// convertIndex converts a sqlspec.Index into a schema.Index. +func convertIndex(spec *sqlspec.Index, parent *schema.Table) (*schema.Index, error) { + idx, err := specutil.Index(spec, parent, convertPart) + if err != nil { + return nil, err + } + if err := convertIndexType(spec, idx); err != nil { + return nil, err + } + return idx, nil +} + +func convertIndexType(spec specutil.Attrer, idx *schema.Index) error { + if attr, ok := spec.Attr("type"); ok { + t, err := attr.String() + if err != nil { + return err + } + idx.AddAttrs(&IndexType{T: t}) + } + return nil +} + +func convertPart(spec *sqlspec.IndexPart, part *schema.IndexPart) error { + if attr, ok := spec.Attr("prefix"); ok { + if part.X != nil { + return errors.New("attribute 'on.prefix' cannot be used in functional part") + } + p, err := attr.Int() + if err != nil { + return err + } + part.AddAttrs(&SubPart{Len: p}) + } + return nil +} + +// convertCheck converts a sqlspec.Check into a schema.Check. +func convertCheck(spec *sqlspec.Check) (*schema.Check, error) { + c, err := specutil.Check(spec) + if err != nil { + return nil, err + } + if attr, ok := spec.Attr("enforced"); ok { + b, err := attr.Bool() + if err != nil { + return nil, err + } + c.AddAttrs(&Enforced{V: b}) + } + return c, nil +} + +// convertColumn converts a sqlspec.Column into a schema.Column. +func convertColumn(spec *sqlspec.Column, _ *schema.Table) (*schema.Column, error) { + c, err := specutil.Column(spec, convertColumnType) + if err != nil { + return nil, err + } + if err := convertCharset(spec, &c.Attrs); err != nil { + return nil, err + } + if attr, ok := spec.Attr("on_update"); ok { + x, err := attr.RawExpr() + if err != nil { + return nil, fmt.Errorf(`unexpected type %T for atrribute "on_update"`, attr.V.Type()) + } + c.AddAttrs(&OnUpdate{A: x.X}) + } + if attr, ok := spec.Attr("auto_increment"); ok { + b, err := attr.Bool() + if err != nil { + return nil, err + } + if b { + c.AddAttrs(&AutoIncrement{}) + } + } + if err := specutil.ConvertGenExpr(spec.Remain(), c, storedOrVirtual); err != nil { + return nil, err + } + return c, err +} + +// convertColumnType converts a sqlspec.Column into a concrete MySQL schema.Type. +func convertColumnType(spec *sqlspec.Column) (schema.Type, error) { + return TypeRegistry.Type(spec.Type, spec.Extra.Attrs) +} + +// schemaSpec converts from a concrete MySQL schema to Atlas specification. +func schemaSpec(s *schema.Schema) (*sqlspec.Schema, []*sqlspec.Table, error) { + sc, t, err := specutil.FromSchema(s, tableSpec) + if err != nil { + return nil, nil, err + } + if c, ok := hasCharset(s.Attrs, nil); ok { + sc.Extra.Attrs = append(sc.Extra.Attrs, schemahcl.StringAttr("charset", c)) + } + if c, ok := hasCollate(s.Attrs, nil); ok { + sc.Extra.Attrs = append(sc.Extra.Attrs, schemahcl.StringAttr("collate", c)) + } + return sc, t, nil +} + +// tableSpec converts from a concrete MySQL sqlspec.Table to a schema.Table. +func tableSpec(t *schema.Table) (*sqlspec.Table, error) { + ts, err := specutil.FromTable( + t, + columnSpec, + pkSpec, + indexSpec, + specutil.FromForeignKey, + checkSpec, + ) + if err != nil { + return nil, err + } + if c, ok := hasCharset(t.Attrs, t.Schema.Attrs); ok { + ts.Extra.Attrs = append(ts.Extra.Attrs, schemahcl.StringAttr("charset", c)) + } + if c, ok := hasCollate(t.Attrs, t.Schema.Attrs); ok { + ts.Extra.Attrs = append(ts.Extra.Attrs, schemahcl.StringAttr("collate", c)) + } + return ts, nil +} + +func pkSpec(idx *schema.Index) (*sqlspec.PrimaryKey, error) { + spec, err := specutil.FromPrimaryKey(idx) + if err != nil { + return nil, err + } + spec.Extra.Attrs = indexTypeSpec(idx, spec.Extra.Attrs) + return spec, nil +} + +func indexSpec(idx *schema.Index) (*sqlspec.Index, error) { + spec, err := specutil.FromIndex(idx, partAttr) + if err != nil { + return nil, err + } + spec.Extra.Attrs = indexTypeSpec(idx, spec.Extra.Attrs) + return spec, nil +} + +func indexTypeSpec(idx *schema.Index, attrs []*schemahcl.Attr) []*schemahcl.Attr { + // Avoid printing the index type if it is the default. + if i := (IndexType{}); sqlx.Has(idx.Attrs, &i) && i.T != IndexTypeBTree { + attrs = append(attrs, specutil.VarAttr("type", strings.ToUpper(i.T))) + } + return attrs +} + +func partAttr(_ *schema.Index, part *schema.IndexPart, spec *sqlspec.IndexPart) error { + if p := (SubPart{}); sqlx.Has(part.Attrs, &p) && p.Len > 0 { + spec.Extra.Attrs = append(spec.Extra.Attrs, schemahcl.IntAttr("prefix", p.Len)) + } + return nil +} + +// columnSpec converts from a concrete MySQL schema.Column into a sqlspec.Column. +func columnSpec(c *schema.Column, t *schema.Table) (*sqlspec.Column, error) { + spec, err := specutil.FromColumn(c, columnTypeSpec) + if err != nil { + return nil, err + } + if c, ok := hasCharset(c.Attrs, t.Attrs); ok { + spec.Extra.Attrs = append(spec.Extra.Attrs, schemahcl.StringAttr("charset", c)) + } + if c, ok := hasCollate(c.Attrs, t.Attrs); ok { + spec.Extra.Attrs = append(spec.Extra.Attrs, schemahcl.StringAttr("collate", c)) + } + if o := (OnUpdate{}); sqlx.Has(c.Attrs, &o) { + spec.Extra.Attrs = append(spec.Extra.Attrs, schemahcl.RawAttr("on_update", o.A)) + } + if sqlx.Has(c.Attrs, &AutoIncrement{}) { + spec.Extra.Attrs = append(spec.Extra.Attrs, schemahcl.BoolAttr("auto_increment", true)) + } + if x := (schema.GeneratedExpr{}); sqlx.Has(c.Attrs, &x) { + spec.Extra.Children = append(spec.Extra.Children, specutil.FromGenExpr(x, storedOrVirtual)) + } + return spec, nil +} + +// storedOrVirtual returns a STORED or VIRTUAL +// generated type option based on the given string. +func storedOrVirtual(s string) string { + switch s = strings.ToUpper(s); s { + // The default is VIRTUAL if no type is specified. + case "": + return virtual + // In MariaDB, PERSISTENT is synonyms for STORED. + case persistent: + return stored + } + return s +} + +// checkSpec converts from a concrete MySQL schema.Check into a sqlspec.Check. +func checkSpec(s *schema.Check) *sqlspec.Check { + c := specutil.FromCheck(s) + if e := (Enforced{}); sqlx.Has(s.Attrs, &e) { + c.Extra.Attrs = append(c.Extra.Attrs, schemahcl.BoolAttr("enforced", true)) + } + return c +} + +// columnTypeSpec converts from a concrete MySQL schema.Type into sqlspec.Column Type. +func columnTypeSpec(t schema.Type) (*sqlspec.Column, error) { + st, err := TypeRegistry.Convert(t) + if err != nil { + return nil, err + } + c := &sqlspec.Column{Type: st} + for _, attr := range st.Attrs { + // TODO(rotemtam): infer this from the TypeSpec + if attr.K == "unsigned" { + c.Extra.Attrs = append(c.Extra.Attrs, attr) + } + } + return c, nil +} + +// convertCharset converts spec charset/collation +// attributes to schema element attributes. +func convertCharset(spec specutil.Attrer, attrs *[]schema.Attr) error { + if attr, ok := spec.Attr("charset"); ok { + s, err := attr.String() + if err != nil { + return err + } + *attrs = append(*attrs, &schema.Charset{V: s}) + } + // For backwards compatibility, accepts both "collate" and "collation". + attr, ok := spec.Attr("collate") + if !ok { + attr, ok = spec.Attr("collation") + } + if ok { + s, err := attr.String() + if err != nil { + return err + } + *attrs = append(*attrs, &schema.Collation{V: s}) + } + return nil +} + +// hasCharset reports if the attribute contains the "charset" attribute, +// and it needs to be defined explicitly on the schema. This is true, in +// case the element charset is different from its parent charset. +func hasCharset(attr []schema.Attr, parent []schema.Attr) (string, bool) { + var c, p schema.Charset + if sqlx.Has(attr, &c) && (parent == nil || sqlx.Has(parent, &p) && c.V != p.V) { + return c.V, true + } + return "", false +} + +// hasCollate reports if the attribute contains the "collation"/"collate" attribute, +// and it needs to be defined explicitly on the schema. This is true, in +// case the element collation is different from its parent collation. +func hasCollate(attr []schema.Attr, parent []schema.Attr) (string, bool) { + var c, p schema.Collation + if sqlx.Has(attr, &c) && (parent == nil || sqlx.Has(parent, &p) && c.V != p.V) { + return c.V, true + } + return "", false +} + +// TypeRegistry contains the supported TypeSpecs for the mysql driver. +var TypeRegistry = schemahcl.NewRegistry( + schemahcl.WithFormatter(FormatType), + schemahcl.WithParser(ParseType), + schemahcl.WithSpecs( + &schemahcl.TypeSpec{ + Name: TypeEnum, + T: TypeEnum, + Attributes: []*schemahcl.TypeAttr{ + {Name: "values", Kind: reflect.Slice, Required: true}, + }, + RType: reflect.TypeOf(schema.EnumType{}), + FromSpec: func(t *schemahcl.Type) (schema.Type, error) { + if len(t.Attrs) != 1 || t.Attrs[0].K != "values" { + return nil, fmt.Errorf("invalid enum type spec: %v", t) + } + v, err := t.Attrs[0].Strings() + if err != nil { + return nil, err + } + return &schema.EnumType{T: "enum", Values: v}, nil + }, + }, + &schemahcl.TypeSpec{ + Name: TypeSet, + T: TypeSet, + Attributes: []*schemahcl.TypeAttr{ + {Name: "values", Kind: reflect.Slice, Required: true}, + }, + RType: reflect.TypeOf(SetType{}), + FromSpec: func(t *schemahcl.Type) (schema.Type, error) { + if len(t.Attrs) != 1 || t.Attrs[0].K != "values" { + return nil, fmt.Errorf("invalid set type spec: %v", t) + } + v, err := t.Attrs[0].Strings() + if err != nil { + return nil, err + } + return &SetType{Values: v}, nil + }, + }, + schemahcl.NewTypeSpec(TypeBool), + schemahcl.NewTypeSpec(TypeBoolean), + schemahcl.NewTypeSpec(TypeBit, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeInt, schemahcl.WithAttributes(unsignedTypeAttr(), schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeTinyInt, schemahcl.WithAttributes(unsignedTypeAttr(), schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeSmallInt, schemahcl.WithAttributes(unsignedTypeAttr(), schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeMediumInt, schemahcl.WithAttributes(unsignedTypeAttr(), schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeBigInt, schemahcl.WithAttributes(unsignedTypeAttr(), schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeDecimal, schemahcl.WithAttributes(unsignedTypeAttr(), &schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false}, &schemahcl.TypeAttr{Name: "scale", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeNumeric, schemahcl.WithAttributes(unsignedTypeAttr(), &schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false}, &schemahcl.TypeAttr{Name: "scale", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeFloat, schemahcl.WithAttributes(unsignedTypeAttr(), &schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false}, &schemahcl.TypeAttr{Name: "scale", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeDouble, schemahcl.WithAttributes(unsignedTypeAttr(), &schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false}, &schemahcl.TypeAttr{Name: "scale", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeReal, schemahcl.WithAttributes(unsignedTypeAttr(), &schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false}, &schemahcl.TypeAttr{Name: "scale", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeTimestamp, schemahcl.WithAttributes(&schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeDate), + schemahcl.NewTypeSpec(TypeTime, schemahcl.WithAttributes(&schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeDateTime, schemahcl.WithAttributes(&schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeYear, schemahcl.WithAttributes(&schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeVarchar, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(true))), + schemahcl.NewTypeSpec(TypeChar, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeVarBinary, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(true))), + schemahcl.NewTypeSpec(TypeBinary, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeBlob, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeTinyBlob), + schemahcl.NewTypeSpec(TypeMediumBlob), + schemahcl.NewTypeSpec(TypeLongBlob), + schemahcl.NewTypeSpec(TypeJSON), + schemahcl.NewTypeSpec(TypeText, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeTinyText), + schemahcl.NewTypeSpec(TypeMediumText), + schemahcl.NewTypeSpec(TypeLongText), + schemahcl.NewTypeSpec(TypeGeometry), + schemahcl.NewTypeSpec(TypePoint), + schemahcl.NewTypeSpec(TypeMultiPoint), + schemahcl.NewTypeSpec(TypeLineString), + schemahcl.NewTypeSpec(TypeMultiLineString), + schemahcl.NewTypeSpec(TypePolygon), + schemahcl.NewTypeSpec(TypeMultiPolygon), + schemahcl.NewTypeSpec(TypeGeometryCollection), + ), +) + +func unsignedTypeAttr() *schemahcl.TypeAttr { + return &schemahcl.TypeAttr{ + Name: "unsigned", + Kind: reflect.Bool, + } +} diff --git a/vendor/ariga.io/atlas/sql/mysql/tidb.go b/vendor/ariga.io/atlas/sql/mysql/tidb.go new file mode 100644 index 00000000..e9ea434c --- /dev/null +++ b/vendor/ariga.io/atlas/sql/mysql/tidb.go @@ -0,0 +1,288 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package mysql + +import ( + "context" + "encoding/binary" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" +) + +type ( + // tplanApply decorates MySQL planApply. + tplanApply struct{ planApply } + // tdiff decorates MySQL diff. + tdiff struct{ diff } + // tinspect decorates MySQL inspect. + tinspect struct{ inspect } +) + +// priority computes the priority of each change. +// +// TiDB does not support multischema ALTERs (i.e. multiple changes in a single ALTER statement). +// Therefore, we have to break down each alter. This function helps order the ALTERs so they work. +// e.g. priority gives precedence to DropForeignKey over DropColumn, because a column cannot be +// dropped if its foreign key was not dropped before. +func priority(change schema.Change) int { + switch c := change.(type) { + case *schema.ModifyTable: + // each modifyTable should have a single change since we apply `flat` before we sort. + return priority(c.Changes[0]) + case *schema.ModifySchema: + // each modifyTable should have a single change since we apply `flat` before we sort. + return priority(c.Changes[0]) + case *schema.AddColumn: + return 1 + case *schema.DropIndex, *schema.DropForeignKey, *schema.DropAttr, *schema.DropCheck: + return 2 + case *schema.ModifyIndex, *schema.ModifyForeignKey: + return 3 + default: + return 4 + } +} + +// flat takes a list of changes and breaks them down to single atomic changes (e.g: no ModifyTable +// with multiple AddColumn inside it). Note that, the only "changes" that include sub-changes are +// `ModifyTable` and `ModifySchema`. +func flat(changes []schema.Change) []schema.Change { + var flat []schema.Change + for _, change := range changes { + switch m := change.(type) { + case *schema.ModifyTable: + for _, c := range m.Changes { + flat = append(flat, &schema.ModifyTable{ + T: m.T, + Changes: []schema.Change{c}, + }) + } + case *schema.ModifySchema: + for _, c := range m.Changes { + flat = append(flat, &schema.ModifySchema{ + S: m.S, + Changes: []schema.Change{c}, + }) + } + default: + flat = append(flat, change) + } + } + return flat +} + +// PlanChanges returns a migration plan for the given schema changes. +func (p *tplanApply) PlanChanges(ctx context.Context, name string, changes []schema.Change, opts ...migrate.PlanOption) (*migrate.Plan, error) { + fc := flat(changes) + sort.SliceStable(fc, func(i, j int) bool { + return priority(fc[i]) < priority(fc[j]) + }) + s := &state{ + conn: p.conn, + Plan: migrate.Plan{ + Name: name, + // A plan is reversible, if all + // its changes are reversible. + Reversible: true, + Transactional: false, + }, + } + for _, c := range fc { + // Use the planner of MySQL with each "atomic" change. + plan, err := p.planApply.PlanChanges(ctx, name, []schema.Change{c}, opts...) + if err != nil { + return nil, err + } + if !plan.Reversible { + s.Plan.Reversible = false + } + s.Plan.Changes = append(s.Plan.Changes, plan.Changes...) + } + return &s.Plan, nil +} + +func (p *tplanApply) ApplyChanges(ctx context.Context, changes []schema.Change, opts ...migrate.PlanOption) error { + return sqlx.ApplyChanges(ctx, changes, p, opts...) +} + +func (i *tinspect) InspectSchema(ctx context.Context, name string, opts *schema.InspectOptions) (*schema.Schema, error) { + s, err := i.inspect.InspectSchema(ctx, name, opts) + if err != nil { + return nil, err + } + return i.patchSchema(ctx, s) +} + +func (i *tinspect) InspectRealm(ctx context.Context, opts *schema.InspectRealmOption) (*schema.Realm, error) { + r, err := i.inspect.InspectRealm(ctx, opts) + if err != nil { + return nil, err + } + for _, s := range r.Schemas { + if _, err := i.patchSchema(ctx, s); err != nil { + return nil, err + } + } + return r, nil +} + +func (i *tinspect) patchSchema(ctx context.Context, s *schema.Schema) (*schema.Schema, error) { + for _, t := range s.Tables { + var createStmt CreateStmt + if ok := sqlx.Has(t.Attrs, &createStmt); !ok { + if err := i.createStmt(ctx, t); err != nil { + return nil, err + } + } + if err := i.setCollate(t); err != nil { + return nil, err + } + if err := i.setFKs(s, t); err != nil { + return nil, err + } + if err := i.setAutoIncrement(t); err != nil { + return nil, err + } + for _, c := range t.Columns { + i.patchColumn(ctx, c) + } + } + return s, nil +} + +func (i *tinspect) patchColumn(_ context.Context, c *schema.Column) { + _, ok := c.Type.Type.(*BitType) + if !ok { + return + } + // TiDB has a bug where it does not format bit default value correctly. + if lit, ok := c.Default.(*schema.Literal); ok && !strings.HasPrefix(lit.V, "b'") { + lit.V = bytesToBitLiteral([]byte(lit.V)) + } +} + +// bytesToBitLiteral converts a bytes to MySQL bit literal. +// e.g. []byte{4} -> b'100', []byte{2,1} -> b'1000000001'. +// See: https://github.com/pingcap/tidb/issues/32655. +func bytesToBitLiteral(b []byte) string { + bytes := make([]byte, 8) + for i := 0; i < len(b); i++ { + bytes[8-len(b)+i] = b[i] + } + val := binary.BigEndian.Uint64(bytes) + return fmt.Sprintf("b'%b'", val) +} + +// e.g. CONSTRAINT "" FOREIGN KEY ("foo_id") REFERENCES "foo" ("id"). +var reFK = regexp.MustCompile("(?i)CONSTRAINT\\s+[\"`]*(\\w+)[\"`]*\\s+FOREIGN\\s+KEY\\s*\\(([,\"` \\w]+)\\)\\s+REFERENCES\\s+[\"`]*(\\w+)[\"`]*\\s*\\(([,\"` \\w]+)\\).*") +var reActions = regexp.MustCompile(fmt.Sprintf("(?i)(ON)\\s+(UPDATE|DELETE)\\s+(%s|%s|%s|%s|%s)", schema.NoAction, schema.Restrict, schema.SetNull, schema.SetDefault, schema.Cascade)) + +func (i *tinspect) setFKs(s *schema.Schema, t *schema.Table) error { + var c CreateStmt + if !sqlx.Has(t.Attrs, &c) { + return fmt.Errorf("missing CREATE TABLE statment in attribuets for %q", t.Name) + } + for _, m := range reFK.FindAllStringSubmatch(c.S, -1) { + if len(m) != 5 { + return fmt.Errorf("unexpected number of matches for a table constraint: %q", m) + } + stmt, ctName, clmns, refTableName, refClmns := m[0], m[1], m[2], m[3], m[4] + fk := &schema.ForeignKey{ + Symbol: ctName, + Table: t, + } + actions := reActions.FindAllStringSubmatch(stmt, 2) + for _, actionMatches := range actions { + actionType, actionOp := actionMatches[2], actionMatches[3] + switch actionType { + case "UPDATE": + fk.OnUpdate = schema.ReferenceOption(actionOp) + case "DELETE": + fk.OnDelete = schema.ReferenceOption(actionOp) + default: + return fmt.Errorf("action type %s is none of 'UPDATE'/'DELETE'", actionType) + } + } + refTable, ok := s.Table(refTableName) + if !ok { + return fmt.Errorf("couldn't resolve ref table %s on ", m[3]) + } + fk.RefTable = refTable + for _, c := range columns(s, clmns) { + column, ok := t.Column(c) + if !ok { + return fmt.Errorf("column %q was not found for fk %q", c, ctName) + } + fk.Columns = append(fk.Columns, column) + } + for _, c := range columns(s, refClmns) { + column, ok := refTable.Column(c) + if !ok { + return fmt.Errorf("ref column %q was not found for fk %q", c, ctName) + } + fk.RefColumns = append(fk.RefColumns, column) + } + t.ForeignKeys = append(t.ForeignKeys, fk) + } + return nil +} + +// columns from the matched regex above. +func columns(_ *schema.Schema, s string) []string { + names := strings.Split(s, ",") + for i := range names { + names[i] = strings.Trim(strings.TrimSpace(names[i]), "`\"") + } + return names +} + +// e.g CHARSET=utf8mb4 COLLATE=utf8mb4_bin +var reColl = regexp.MustCompile(`(?i)CHARSET\s*=\s*(\w+)\s*COLLATE\s*=\s*(\w+)`) + +// setCollate extracts the updated Collation from CREATE TABLE statement. +func (i *tinspect) setCollate(t *schema.Table) error { + var c CreateStmt + if !sqlx.Has(t.Attrs, &c) { + return fmt.Errorf("missing CREATE TABLE statement in attributes for %q", t.Name) + } + matches := reColl.FindStringSubmatch(c.S) + if len(matches) != 3 { + return fmt.Errorf("missing COLLATE and/or CHARSET information on CREATE TABLE statement for %q", t.Name) + } + t.SetCharset(matches[1]) + t.SetCollation(matches[2]) + return nil +} + +// setCollate extracts the updated Collation from CREATE TABLE statement. +func (i *tinspect) setAutoIncrement(t *schema.Table) error { + // patch only it is set (set falsely to '1' due to this bug:https://github.com/pingcap/tidb/issues/24702). + ai := &AutoIncrement{} + if !sqlx.Has(t.Attrs, ai) { + return nil + } + var c CreateStmt + if !sqlx.Has(t.Attrs, &c) { + return fmt.Errorf("missing CREATE TABLE statement in attributes for %q", t.Name) + } + matches := reAutoinc.FindStringSubmatch(c.S) + if len(matches) != 2 { + return nil + } + v, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return err + } + ai.V = v + schema.ReplaceOrAppend(&t.Attrs, ai) + return nil +} diff --git a/vendor/ariga.io/atlas/sql/postgres/BUILD b/vendor/ariga.io/atlas/sql/postgres/BUILD new file mode 100644 index 00000000..c661bfd5 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/postgres/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "postgres", + srcs = [ + "convert.go", + "crdb.go", + "diff.go", + "driver.go", + "inspect.go", + "migrate.go", + "sqlspec.go", + ], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/postgres", + importpath = "ariga.io/atlas/sql/postgres", + visibility = ["//visibility:public"], + deps = [ + "//vendor/ariga.io/atlas/schemahcl", + "//vendor/ariga.io/atlas/sql/internal/specutil", + "//vendor/ariga.io/atlas/sql/internal/sqlx", + "//vendor/ariga.io/atlas/sql/migrate", + "//vendor/ariga.io/atlas/sql/postgres/internal/postgresop", + "//vendor/ariga.io/atlas/sql/schema", + "//vendor/ariga.io/atlas/sql/sqlclient", + "//vendor/ariga.io/atlas/sql/sqlspec", + "//vendor/github.com/hashicorp/hcl/v2/hclparse", + "//vendor/github.com/zclconf/go-cty/cty", + ], +) diff --git a/vendor/ariga.io/atlas/sql/postgres/convert.go b/vendor/ariga.io/atlas/sql/postgres/convert.go new file mode 100644 index 00000000..d12c478e --- /dev/null +++ b/vendor/ariga.io/atlas/sql/postgres/convert.go @@ -0,0 +1,473 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package postgres + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "ariga.io/atlas/sql/schema" +) + +// FormatType converts schema type to its column form in the database. +// An error is returned if the type cannot be recognized. +func FormatType(t schema.Type) (string, error) { + var f string + switch t := t.(type) { + case *ArrayType: + f = strings.ToLower(t.T) + case *BitType: + f = strings.ToLower(t.T) + // BIT without a length is equivalent to BIT(1), + // BIT VARYING has unlimited length. + if f == TypeBit && t.Len > 1 || f == TypeBitVar && t.Len > 0 { + f = fmt.Sprintf("%s(%d)", f, t.Len) + } + case *schema.BoolType: + // BOOLEAN can be abbreviated as BOOL. + if f = strings.ToLower(t.T); f == TypeBool { + f = TypeBoolean + } + case *schema.BinaryType: + f = strings.ToLower(t.T) + case *CurrencyType: + f = strings.ToLower(t.T) + case *schema.EnumType: + if t.T == "" { + return "", errors.New("postgres: missing enum type name") + } + f = t.T + case *schema.IntegerType: + switch f = strings.ToLower(t.T); f { + case TypeSmallInt, TypeInteger, TypeBigInt: + case TypeInt2: + f = TypeSmallInt + case TypeInt, TypeInt4: + f = TypeInteger + case TypeInt8: + f = TypeBigInt + } + case *IntervalType: + f = strings.ToLower(t.T) + if t.F != "" { + f += " " + strings.ToLower(t.F) + } + if t.Precision != nil && *t.Precision != defaultTimePrecision { + f += fmt.Sprintf("(%d)", *t.Precision) + } + case *schema.StringType: + switch f = strings.ToLower(t.T); f { + case TypeText, typeName: + // CHAR(n) is alias for CHARACTER(n). If not length was + // specified, the definition is equivalent to CHARACTER(1). + case TypeChar, TypeCharacter: + n := t.Size + if n == 0 { + n = 1 + } + f = fmt.Sprintf("%s(%d)", TypeCharacter, n) + // VARCHAR(n) is alias for CHARACTER VARYING(n). If not length + // was specified, the type accepts strings of any size. + case TypeVarChar, TypeCharVar: + f = TypeCharVar + if t.Size != 0 { + f = fmt.Sprintf("%s(%d)", TypeCharVar, t.Size) + } + default: + return "", fmt.Errorf("postgres: unexpected string type: %q", t.T) + } + case *schema.TimeType: + f = timeAlias(t.T) + if p := t.Precision; p != nil && *p != defaultTimePrecision && strings.HasPrefix(f, "time") { + f += fmt.Sprintf("(%d)", *p) + } + case *schema.FloatType: + switch f = strings.ToLower(t.T); f { + case TypeFloat4: + f = TypeReal + case TypeFloat8: + f = TypeDouble + case TypeFloat: + switch { + case t.Precision > 0 && t.Precision <= 24: + f = TypeReal + case t.Precision == 0 || (t.Precision > 24 && t.Precision <= 53): + f = TypeDouble + default: + return "", fmt.Errorf("postgres: precision for type float must be between 1 and 53: %d", t.Precision) + } + } + case *schema.DecimalType: + switch f = strings.ToLower(t.T); f { + case TypeNumeric: + // The DECIMAL type is an alias for NUMERIC. + case TypeDecimal: + f = TypeNumeric + default: + return "", fmt.Errorf("postgres: unexpected decimal type: %q", t.T) + } + switch p, s := t.Precision, t.Scale; { + case p == 0 && s == 0: + case s < 0: + return "", fmt.Errorf("postgres: decimal type must have scale >= 0: %d", s) + case p == 0 && s > 0: + return "", fmt.Errorf("postgres: decimal type must have precision between 1 and 1000: %d", p) + case s == 0: + f = fmt.Sprintf("%s(%d)", f, p) + default: + f = fmt.Sprintf("%s(%d,%d)", f, p, s) + } + case *SerialType: + switch f = strings.ToLower(t.T); f { + case TypeSmallSerial, TypeSerial, TypeBigSerial: + case TypeSerial2: + f = TypeSmallSerial + case TypeSerial4: + f = TypeSerial + case TypeSerial8: + f = TypeBigSerial + default: + return "", fmt.Errorf("postgres: unexpected serial type: %q", t.T) + } + case *schema.JSONType: + f = strings.ToLower(t.T) + case *schema.UUIDType: + f = strings.ToLower(t.T) + case *schema.SpatialType: + f = strings.ToLower(t.T) + case *NetworkType: + f = strings.ToLower(t.T) + case *RangeType: + switch f = strings.ToLower(t.T); f { + case TypeInt4Range, TypeInt4MultiRange, TypeInt8Range, TypeInt8MultiRange, TypeNumRange, TypeNumMultiRange, + TypeTSRange, TypeTSMultiRange, TypeTSTZRange, TypeTSTZMultiRange, TypeDateRange, TypeDateMultiRange: + default: + return "", fmt.Errorf("postgres: unsupported range type: %q", t.T) + } + case *OIDType: + switch f = strings.ToLower(t.T); f { + case typeOID, typeRegClass, typeRegCollation, typeRegConfig, typeRegDictionary, typeRegNamespace, + typeRegOper, typeRegOperator, typeRegProc, typeRegProcedure, typeRegRole, typeRegType: + default: + return "", fmt.Errorf("postgres: unsupported object identfier type: %q", t.T) + } + case *TextSearchType: + if f = strings.ToLower(t.T); f != TypeTSVector && f != TypeTSQuery { + return "", fmt.Errorf("postgres: unsupported text search type: %q", t.T) + } + case *UserDefinedType: + f = strings.ToLower(t.T) + case *XMLType: + f = strings.ToLower(t.T) + case *schema.UnsupportedType: + return "", fmt.Errorf("postgres: unsupported type: %q", t.T) + default: + return "", fmt.Errorf("postgres: invalid schema type: %T", t) + } + return f, nil +} + +// ParseType returns the schema.Type value represented by the given raw type. +// The raw value is expected to follow the format in PostgreSQL information schema +// or as an input for the CREATE TABLE statement. +func ParseType(typ string) (schema.Type, error) { + var ( + err error + d *columnDesc + ) + // Normalize PostgreSQL array data types from "CREATE TABLE" format to + // "INFORMATION_SCHEMA" format (i.e. as it is inspected from the database). + if t, ok := arrayType(typ); ok { + d = &columnDesc{typ: TypeArray, fmtype: t + "[]"} + } else if d, err = parseColumn(typ); err != nil { + return nil, err + } + t, err := columnType(d) + if err != nil { + return nil, err + } + // If the type is unknown (to us), we fall back to user-defined but expect + // to improve this in future versions by ensuring this against the database. + if ut, ok := t.(*schema.UnsupportedType); ok { + t = &UserDefinedType{T: ut.T} + } + return t, nil +} + +func columnType(c *columnDesc) (schema.Type, error) { + var typ schema.Type + switch t := c.typ; strings.ToLower(t) { + case TypeBigInt, TypeInt8, TypeInt, TypeInteger, TypeInt4, TypeSmallInt, TypeInt2, TypeInt64: + typ = &schema.IntegerType{T: t} + case TypeBit, TypeBitVar: + typ = &BitType{T: t, Len: c.size} + case TypeBool, TypeBoolean: + typ = &schema.BoolType{T: t} + case TypeBytea: + typ = &schema.BinaryType{T: t} + case TypeCharacter, TypeChar, TypeCharVar, TypeVarChar, TypeText, typeName: + // A `character` column without length specifier is equivalent to `character(1)`, + // but `varchar` without length accepts strings of any size (same as `text`). + typ = &schema.StringType{T: t, Size: int(c.size)} + case TypeCIDR, TypeInet, TypeMACAddr, TypeMACAddr8: + typ = &NetworkType{T: t} + case TypeCircle, TypeLine, TypeLseg, TypeBox, TypePath, TypePolygon, TypePoint, TypeGeometry: + typ = &schema.SpatialType{T: t} + case TypeDate: + typ = &schema.TimeType{T: t} + case TypeTime, TypeTimeWOTZ, TypeTimeTZ, TypeTimeWTZ, TypeTimestamp, + TypeTimestampTZ, TypeTimestampWTZ, TypeTimestampWOTZ: + p := defaultTimePrecision + if c.timePrecision != nil { + p = int(*c.timePrecision) + } + typ = &schema.TimeType{T: t, Precision: &p} + case TypeInterval: + p := defaultTimePrecision + if c.timePrecision != nil { + p = int(*c.timePrecision) + } + typ = &IntervalType{T: t, Precision: &p} + if c.interval != "" { + f, ok := intervalField(c.interval) + if !ok { + return &schema.UnsupportedType{T: c.interval}, nil + } + typ.(*IntervalType).F = f + } + case TypeReal, TypeDouble, TypeFloat, TypeFloat4, TypeFloat8: + typ = &schema.FloatType{T: t, Precision: int(c.precision)} + case TypeJSON, TypeJSONB: + typ = &schema.JSONType{T: t} + case TypeMoney: + typ = &CurrencyType{T: t} + case TypeDecimal, TypeNumeric: + typ = &schema.DecimalType{T: t, Precision: int(c.precision), Scale: int(c.scale)} + case TypeSmallSerial, TypeSerial, TypeBigSerial, TypeSerial2, TypeSerial4, TypeSerial8: + typ = &SerialType{T: t, Precision: int(c.precision)} + case TypeUUID: + typ = &schema.UUIDType{T: t} + case TypeXML: + typ = &XMLType{T: t} + case TypeArray: + // Ignore multi-dimensions or size constraints + // as they are ignored by the database. + typ = &ArrayType{T: c.fmtype} + if t, ok := arrayType(c.fmtype); ok { + tt, err := ParseType(t) + if err != nil { + return nil, err + } + if c.elemtyp == "e" { + // Override the element type in + // case it is an enum. + tt = newEnumType(t, c.typelem) + } + typ.(*ArrayType).Type = tt + } + case TypeTSVector, TypeTSQuery: + typ = &TextSearchType{T: t} + case TypeInt4Range, TypeInt4MultiRange, TypeInt8Range, TypeInt8MultiRange, TypeNumRange, TypeNumMultiRange, + TypeTSRange, TypeTSMultiRange, TypeTSTZRange, TypeTSTZMultiRange, TypeDateRange, TypeDateMultiRange: + typ = &RangeType{T: t} + case typeOID, typeRegClass, typeRegCollation, typeRegConfig, typeRegDictionary, typeRegNamespace, + typeRegOper, typeRegOperator, typeRegProc, typeRegProcedure, typeRegRole, typeRegType: + typ = &OIDType{T: t} + case TypeUserDefined: + typ = &UserDefinedType{T: c.fmtype} + default: + typ = &schema.UnsupportedType{T: t} + } + switch c.typtype { + case "e": + // The `typtype` column is set to 'e' for enum types, and the + // values are filled in batch after the rows above is closed. + // https://postgresql.org/docs/current/catalog-pg-type.html + typ = newEnumType(c.fmtype, c.typid) + case "d": + // Use user-defined for domain types as we do not + // support their creation at this stage. + typ = &UserDefinedType{T: c.fmtype} + } + return typ, nil +} + +// reArray parses array declaration. See: https://postgresql.org/docs/current/arrays.html. +var reArray = regexp.MustCompile(`(?i)(.+?)(( +ARRAY( *\[[ \d]*] *)*)+|( *\[[ \d]*] *)+)$`) + +// arrayType reports if the given string is an array type (e.g. int[], text[2]), +// and returns its "udt_name" as it was inspected from the database. +func arrayType(t string) (string, bool) { + matches := reArray.FindStringSubmatch(t) + if len(matches) < 2 { + return "", false + } + return strings.TrimSpace(matches[1]), true +} + +// reInterval parses declaration of interval fields. See: https://www.postgresql.org/docs/current/datatype-datetime.html. +var reInterval = regexp.MustCompile(`(?i)(?:INTERVAL\s*)?(YEAR|MONTH|DAY|HOUR|MINUTE|SECOND|YEAR TO MONTH|DAY TO HOUR|DAY TO MINUTE|DAY TO SECOND|HOUR TO MINUTE|HOUR TO SECOND|MINUTE TO SECOND)?\s*(?:\(([0-6])\))?$`) + +// intervalField reports if the given string is an interval +// field type and returns its value (e.g. SECOND, MINUTE TO SECOND). +func intervalField(t string) (string, bool) { + matches := reInterval.FindStringSubmatch(t) + if len(matches) != 3 || matches[1] == "" { + return "", false + } + return matches[1], true +} + +// columnDesc represents a column descriptor. +type columnDesc struct { + typ string // data_type + fmtype string // pg_catalog.format_type + size int64 // character_maximum_length + typtype string // pg_type.typtype + typelem int64 // pg_type.typelem + elemtyp string // pg_type.typtype of the array element type above. + typid int64 // pg_type.oid + precision int64 + timePrecision *int64 + scale int64 + parts []string + interval string +} + +var reDigits = regexp.MustCompile(`\d`) + +func parseColumn(s string) (*columnDesc, error) { + parts := strings.FieldsFunc(s, func(r rune) bool { + return r == '(' || r == ')' || r == ' ' || r == ',' + }) + var ( + err error + c = &columnDesc{ + typ: parts[0], + parts: parts, + } + ) + switch c.parts[0] { + case TypeVarChar, TypeCharVar, TypeChar, TypeCharacter: + if err := parseCharParts(c.parts, c); err != nil { + return nil, err + } + case TypeDecimal, TypeNumeric, TypeFloat: + if len(parts) > 1 { + c.precision, err = strconv.ParseInt(parts[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("postgres: parse precision %q: %w", parts[1], err) + } + } + if len(parts) > 2 { + c.scale, err = strconv.ParseInt(parts[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("postgres: parse scale %q: %w", parts[1], err) + } + } + case TypeBit: + if err := parseBitParts(parts, c); err != nil { + return nil, err + } + case TypeDouble, TypeFloat8: + c.precision = 53 + case TypeReal, TypeFloat4: + c.precision = 24 + case TypeTime, TypeTimeTZ, TypeTimestamp, TypeTimestampTZ: + t, p := s, int64(defaultTimePrecision) + // If the second part is only one digit it is the precision argument. + // For cases like "timestamp(4) with time zone" make sure to not drop + // the rest of the type definition. + if len(parts) > 1 && reDigits.MatchString(parts[1]) { + i, err := strconv.ParseInt(parts[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("postgres: parse time precision %q: %w", parts[1], err) + } + p = i + t = strings.Join(append(c.parts[:1], c.parts[2:]...), " ") + } + c.typ = timeAlias(t) + c.timePrecision = &p + case TypeInterval: + matches := reInterval.FindStringSubmatch(s) + c.interval = matches[1] + if matches[2] != "" { + i, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("postgres: parse interval precision %q: %w", parts[1], err) + } + c.timePrecision = &i + } + default: + c.typ = s + } + return c, nil +} + +func parseCharParts(parts []string, c *columnDesc) error { + j := strings.Join(parts, " ") + switch { + case strings.HasPrefix(j, TypeVarChar): + c.typ = TypeVarChar + parts = parts[1:] + case strings.HasPrefix(j, TypeCharVar): + c.typ = TypeCharVar + parts = parts[2:] + default: + parts = parts[1:] + } + if len(parts) == 0 { + return nil + } + size, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return fmt.Errorf("postgres: parse size %q: %w", parts[0], err) + } + c.size = size + return nil +} + +func parseBitParts(parts []string, c *columnDesc) error { + if len(parts) == 1 { + c.size = 1 + return nil + } + parts = parts[1:] + if parts[0] == "varying" { + c.typ = TypeBitVar + parts = parts[1:] + } + if len(parts) == 0 { + return nil + } + size, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return fmt.Errorf("postgres: parse size %q: %w", parts[1], err) + } + c.size = size + return nil +} + +// timeAlias returns the abbreviation for the given time type. +func timeAlias(t string) string { + switch t = strings.ToLower(t); t { + // TIMESTAMPTZ be equivalent to TIMESTAMP WITH TIME ZONE. + case TypeTimestampWTZ: + t = TypeTimestampTZ + // TIMESTAMP be equivalent to TIMESTAMP WITHOUT TIME ZONE. + case TypeTimestampWOTZ: + t = TypeTimestamp + // TIME be equivalent to TIME WITHOUT TIME ZONE. + case TypeTimeWOTZ: + t = TypeTime + // TIMETZ be equivalent to TIME WITH TIME ZONE. + case TypeTimeWTZ: + t = TypeTimeTZ + } + return t +} diff --git a/vendor/ariga.io/atlas/sql/postgres/crdb.go b/vendor/ariga.io/atlas/sql/postgres/crdb.go new file mode 100644 index 00000000..c98735bf --- /dev/null +++ b/vendor/ariga.io/atlas/sql/postgres/crdb.go @@ -0,0 +1,343 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package postgres + +import ( + "context" + "database/sql" + "fmt" + "regexp" + "strings" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" +) + +type ( + crdbDiff struct{ diff } + crdbInspect struct{ inspect } + noLocker interface { + migrate.Driver + migrate.Snapshoter + migrate.CleanChecker + schema.Normalizer + } + noLockDriver struct { + noLocker + } +) + +var _ sqlx.DiffDriver = (*crdbDiff)(nil) + +// pathSchema fixes: https://github.com/cockroachdb/cockroach/issues/82040. +func (i *crdbInspect) patchSchema(s *schema.Schema) { + for _, t := range s.Tables { + for _, c := range t.Columns { + id, ok := identity(c.Attrs) + if !ok { + continue + } + c.Default = nil + if g := strings.ToUpper(id.Generation); strings.Contains(g, "ALWAYS") { + id.Generation = "ALWAYS" + } else if strings.Contains(g, "BY DEFAULT") { + id.Generation = "BY DEFAULT" + } + schema.ReplaceOrAppend(&c.Attrs, id) + } + } +} + +func (i *crdbInspect) InspectSchema(ctx context.Context, name string, opts *schema.InspectOptions) (*schema.Schema, error) { + s, err := i.inspect.InspectSchema(ctx, name, opts) + if err != nil { + return nil, err + } + i.patchSchema(s) + return s, err +} + +func (i *crdbInspect) InspectRealm(ctx context.Context, opts *schema.InspectRealmOption) (*schema.Realm, error) { + r, err := i.inspect.InspectRealm(ctx, opts) + if err != nil { + return nil, err + } + for _, s := range r.Schemas { + i.patchSchema(s) + } + return r, nil +} + +// Normalize implements the sqlx.Normalizer. +func (cd *crdbDiff) Normalize(from, to *schema.Table) error { + cd.normalize(from) + cd.normalize(to) + return nil +} + +func (cd *crdbDiff) ColumnChange(fromT *schema.Table, from, to *schema.Column) (schema.ChangeKind, error) { + // All serial types in Cockroach are implemented as bigint. + // See: https://www.cockroachlabs.com/docs/stable/serial.html#generated-values-for-mode-sql_sequence-and-sql_sequence_cached. + for _, c := range []*schema.Column{from, to} { + if _, ok := c.Type.Type.(*SerialType); ok { + c.Type.Type = &schema.IntegerType{ + T: TypeBigInt, + } + to.Default = nil + from.Default = nil + } + } + return cd.diff.ColumnChange(fromT, from, to) +} + +func (cd *crdbDiff) normalize(table *schema.Table) { + if table.PrimaryKey == nil { + prim, ok := table.Column("rowid") + if !ok { + prim = schema.NewColumn("rowid"). + AddAttrs(Identity{}). + SetType(&schema.IntegerType{T: TypeBigInt}). + SetDefault(&schema.RawExpr{X: "unique_rowid()"}) + table.AddColumns(prim) + } + table.PrimaryKey = &schema.Index{ + Name: "primary", + Unique: true, + Table: table, + Parts: []*schema.IndexPart{{ + SeqNo: 1, + C: prim, + }}, + } + } + for _, c := range table.Columns { + if _, ok := identity(c.Attrs); ok { + if c.Default != nil { + c.Default = nil + continue + } + } + switch t := c.Type.Type.(type) { + // Integer types are aliased. + // see: cockroachlabs.com/docs/v21.2/int.html#names-and-aliases. + case *schema.IntegerType: + switch t.T { + case TypeBigInt, TypeInteger, TypeInt8, TypeInt64, TypeInt: + t.T = TypeBigInt + case TypeInt2, TypeSmallInt: + t.T = TypeSmallInt + } + case *schema.JSONType: + switch t.T { + // Type json is aliased to jsonb. + case TypeJSON: + t.T = TypeJSONB + } + case *SerialType: + c.Default = &schema.RawExpr{ + X: "unique_rowid()", + } + case *schema.TimeType: + // "timestamp" and "timestamptz" are accepted as + // abbreviations for timestamp with(out) time zone. + switch t.T { + case "timestamp with time zone": + t.T = "timestamptz" + case "timestamp without time zone": + t.T = "timestamp" + } + case *schema.FloatType: + // The same numeric precision is used in all platform. + // See: https://www.postgresql.org/docs/current/datatype-numeric.html + switch { + case t.T == "float" && t.Precision < 25: + // float(1) to float(24) are selected as "real" type. + t.T = "real" + fallthrough + case t.T == "real": + t.Precision = 24 + case t.T == "float" && t.Precision >= 25: + // float(25) to float(53) are selected as "double precision" type. + t.T = "double precision" + fallthrough + case t.T == "double precision": + t.Precision = 53 + } + case *schema.StringType: + switch t.T { + case "character", "char": + // Character without length specifier + // is equivalent to character(1). + t.Size = 1 + } + case *enumType: + c.Type.Type = &schema.EnumType{T: t.T, Values: t.Values} + } + } +} + +func (i *inspect) crdbIndexes(ctx context.Context, s *schema.Schema) error { + rows, err := i.querySchema(ctx, crdbIndexesQuery, s) + if err != nil { + return fmt.Errorf("postgres: querying schema %q indexes: %w", s.Name, err) + } + defer rows.Close() + if err := i.crdbAddIndexes(s, rows); err != nil { + return err + } + return rows.Err() +} + +var reIndexType = regexp.MustCompile("(?i)USING (BTREE|GIN|GIST)") + +func (i *inspect) crdbAddIndexes(s *schema.Schema, rows *sql.Rows) error { + // Unlike Postgres, Cockroach may have duplicate index names. + names := make(map[string]*schema.Index) + for rows.Next() { + var ( + uniq, primary bool + table, name, createStmt string + column, contype, pred, expr, comment sql.NullString + ) + if err := rows.Scan(&table, &name, &column, &primary, &uniq, &contype, &createStmt, &pred, &expr, &comment); err != nil { + return fmt.Errorf("cockroach: scanning indexes for schema %q: %w", s.Name, err) + } + t, ok := s.Table(table) + if !ok { + return fmt.Errorf("table %q was not found in schema", table) + } + uniqueName := fmt.Sprintf("%s.%s", table, name) + idx, ok := names[uniqueName] + if !ok { + idx = &schema.Index{ + Name: name, + Unique: uniq, + Table: t, + } + // Extract index type information from index create statement. + // See: https://www.cockroachlabs.com/docs/stable/create-index.html. + if parts := reIndexType.FindStringSubmatch(createStmt); len(parts) > 0 { + idx.Attrs = append(idx.Attrs, &IndexType{T: parts[1]}) + } + if sqlx.ValidString(comment) { + idx.Attrs = append(idx.Attrs, &schema.Comment{Text: comment.String}) + } + if sqlx.ValidString(contype) { + idx.Attrs = append(idx.Attrs, &Constraint{T: contype.String}) + } + if sqlx.ValidString(pred) { + idx.Attrs = append(idx.Attrs, &IndexPredicate{P: pred.String}) + } + names[uniqueName] = idx + if primary { + t.PrimaryKey = idx + } else { + t.Indexes = append(t.Indexes, idx) + } + } + part := &schema.IndexPart{SeqNo: len(idx.Parts) + 1, Desc: strings.Contains(createStmt, "DESC")} + switch { + case sqlx.ValidString(column): + part.C, ok = t.Column(column.String) + if !ok { + return fmt.Errorf("cockroach: column %q was not found for index %q", column.String, idx.Name) + } + part.C.Indexes = append(part.C.Indexes, idx) + case sqlx.ValidString(expr): + part.X = &schema.RawExpr{ + X: expr.String, + } + default: + return fmt.Errorf("cockroach: invalid part for index %q", idx.Name) + } + idx.Parts = append(idx.Parts, part) + } + return nil +} + +// CockroachDB types that are not part of PostgreSQL. +const ( + TypeInt64 = "int64" + TypeGeometry = "geometry" +) + +const ( + // CockroachDB query for getting schema indexes. + // Scanning constraints is disabled due to internal CockroachDB error. + // (internal error: unexpected type *tree.DOidWrapper for key value) + crdbIndexesQuery = ` +SELECT + t.relname AS table_name, + i.relname AS index_name, + a.attname AS column_name, + idx.indisprimary AS primary, + idx.indisunique AS unique, + NULL AS constraints, + pgi.indexdef create_stmt, + pg_get_expr(idx.indpred, idx.indrelid) AS predicate, + pg_get_indexdef(idx.indexrelid, idx.ord, false) AS expression, + pg_catalog.obj_description(i.oid, 'pg_class') AS comment + FROM + ( + select + *, + generate_series(1,array_length(i.indkey,1)) as ord, + unnest(i.indkey) AS key + from pg_index i + ) idx + JOIN pg_class i ON i.oid = idx.indexrelid + JOIN pg_class t ON t.oid = idx.indrelid + JOIN pg_namespace n ON n.oid = t.relnamespace + LEFT JOIN pg_indexes pgi ON pgi.tablename = t.relname AND indexname = i.relname AND n.nspname = pgi.schemaname + LEFT JOIN pg_attribute a ON (a.attrelid, a.attnum) = (idx.indrelid, idx.key) +WHERE + n.nspname = $1 + AND t.relname IN (%s) +ORDER BY + table_name, index_name, idx.ord +` + + crdbColumnsQuery = ` +SELECT + t1.table_name, + t1.column_name, + t1.data_type, + pg_catalog.format_type(a.atttypid, a.atttypmod) AS format_type, + t1.is_nullable, + t1.column_default, + t1.character_maximum_length, + t1.numeric_precision, + t1.datetime_precision, + t1.numeric_scale, + t1.interval_type, + t1.character_set_name, + t1.collation_name, + t1.is_identity, + t5.start_value as identity_start, + t5.increment_by as identity_increment, + t5.last_value AS identity_last, + t1.identity_generation, + t1.generation_expression, + col_description(t3.oid, "ordinal_position") AS comment, + t4.typtype, + t4.typelem, + (CASE WHEN t4.typcategory = 'A' AND t4.typelem <> 0 THEN (SELECT t.typtype FROM pg_catalog.pg_type t WHERE t.oid = t4.typelem) END) AS elemtyp, + t4.oid +FROM + "information_schema"."columns" AS t1 + JOIN pg_catalog.pg_namespace AS t2 ON t2.nspname = t1.table_schema + JOIN pg_catalog.pg_class AS t3 ON t3.relnamespace = t2.oid AND t3.relname = t1.table_name + JOIN pg_catalog.pg_attribute AS a ON a.attrelid = t3.oid AND a.attname = t1.column_name + LEFT JOIN pg_catalog.pg_type AS t4 + ON t1.udt_name = t4.typname + LEFT JOIN pg_sequences AS t5 + ON quote_ident(t5.schemaname) || '.' || quote_ident(t5.sequencename) = btrim(btrim(t1.column_default, 'nextval('''), '''::REGCLASS)') +WHERE + t1.table_schema = $1 AND t1.table_name IN (%s) +ORDER BY + t1.table_name, t1.ordinal_position +` +) diff --git a/vendor/ariga.io/atlas/sql/postgres/diff.go b/vendor/ariga.io/atlas/sql/postgres/diff.go new file mode 100644 index 00000000..86d8d669 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/postgres/diff.go @@ -0,0 +1,427 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package postgres + +import ( + "context" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/schema" +) + +// DefaultDiff provides basic diffing capabilities for PostgreSQL dialects. +// Note, it is recommended to call Open, create a new Driver and use its Differ +// when a database connection is available. +var DefaultDiff schema.Differ = &sqlx.Diff{DiffDriver: &diff{}} + +// A diff provides a PostgreSQL implementation for sqlx.DiffDriver. +type diff struct{ conn } + +// SchemaAttrDiff returns a changeset for migrating schema attributes from one state to the other. +func (d *diff) SchemaAttrDiff(_, _ *schema.Schema) []schema.Change { + // No special schema attribute diffing for PostgreSQL. + return nil +} + +// TableAttrDiff returns a changeset for migrating table attributes from one state to the other. +func (d *diff) TableAttrDiff(from, to *schema.Table) ([]schema.Change, error) { + var changes []schema.Change + if change := sqlx.CommentDiff(from.Attrs, to.Attrs); change != nil { + changes = append(changes, change) + } + if err := d.partitionChanged(from, to); err != nil { + return nil, err + } + return append(changes, sqlx.CheckDiff(from, to, func(c1, c2 *schema.Check) bool { + return sqlx.Has(c1.Attrs, &NoInherit{}) == sqlx.Has(c2.Attrs, &NoInherit{}) + })...), nil +} + +// ColumnChange returns the schema changes (if any) for migrating one column to the other. +func (d *diff) ColumnChange(_ *schema.Table, from, to *schema.Column) (schema.ChangeKind, error) { + change := sqlx.CommentChange(from.Attrs, to.Attrs) + if from.Type.Null != to.Type.Null { + change |= schema.ChangeNull + } + changed, err := d.typeChanged(from, to) + if err != nil { + return schema.NoChange, err + } + if changed { + change |= schema.ChangeType + } + if changed, err = d.defaultChanged(from, to); err != nil { + return schema.NoChange, err + } + if changed { + change |= schema.ChangeDefault + } + if identityChanged(from.Attrs, to.Attrs) { + change |= schema.ChangeAttr + } + if changed, err = d.generatedChanged(from, to); err != nil { + return schema.NoChange, err + } + if changed { + change |= schema.ChangeGenerated + } + return change, nil +} + +// defaultChanged reports if the default value of a column was changed. +func (d *diff) defaultChanged(from, to *schema.Column) (bool, error) { + d1, ok1 := sqlx.DefaultValue(from) + d2, ok2 := sqlx.DefaultValue(to) + if ok1 != ok2 { + return true, nil + } + if !ok1 && !ok2 || trimCast(d1) == trimCast(d2) || quote(d1) == quote(d2) { + return false, nil + } + var ( + _, fromX = from.Default.(*schema.RawExpr) + _, toX = to.Default.(*schema.RawExpr) + ) + // In case one of the DEFAULT values is an expression, and a database connection is + // available (not DefaultDiff), we use the database to compare in case of mismatch. + // + // SELECT ARRAY[1] = '{1}'::int[] + // SELECT lower('X'::text) = lower('X') + // + if (fromX || toX) && d.conn.ExecQuerier != nil { + equals, err := d.defaultEqual(from.Default, to.Default) + return !equals, err + } + return true, nil +} + +// generatedChanged reports if the generated expression of a column was changed. +func (*diff) generatedChanged(from, to *schema.Column) (bool, error) { + var fromX, toX schema.GeneratedExpr + switch fromHas, toHas := sqlx.Has(from.Attrs, &fromX), sqlx.Has(to.Attrs, &toX); { + case fromHas && toHas && sqlx.MayWrap(fromX.Expr) != sqlx.MayWrap(toX.Expr): + return false, fmt.Errorf("changing the generation expression for a column %q is not supported", from.Name) + case !fromHas && toHas: + return false, fmt.Errorf("changing column %q to generated column is not supported (drop and add is required)", from.Name) + default: + // Only DROP EXPRESSION is supported. + return fromHas && !toHas, nil + } +} + +// partitionChanged checks and returns an error if the partition key of a table was changed. +func (*diff) partitionChanged(from, to *schema.Table) error { + var fromP, toP Partition + switch fromHas, toHas := sqlx.Has(from.Attrs, &fromP), sqlx.Has(to.Attrs, &toP); { + case fromHas && !toHas: + return fmt.Errorf("partition key cannot be dropped from %q (drop and add is required)", from.Name) + case !fromHas && toHas: + return fmt.Errorf("partition key cannot be added to %q (drop and add is required)", to.Name) + case fromHas && toHas: + s1, err := formatPartition(fromP) + if err != nil { + return err + } + s2, err := formatPartition(toP) + if err != nil { + return err + } + if s1 != s2 { + return fmt.Errorf("partition key of table %q cannot be changed from %s to %s (drop and add is required)", to.Name, s1, s2) + } + } + return nil +} + +// IsGeneratedIndexName reports if the index name was generated by the database. +func (d *diff) IsGeneratedIndexName(t *schema.Table, idx *schema.Index) bool { + names := make([]string, len(idx.Parts)) + for i, p := range idx.Parts { + if p.C == nil { + return false + } + names[i] = p.C.Name + } + // Auto-generate index names will have the following format: __..._key. + // In case of conflict, PostgreSQL adds additional index at the end (e.g. "key1"). + p := fmt.Sprintf("%s_%s_key", t.Name, strings.Join(names, "_")) + if idx.Name == p { + return true + } + i, err := strconv.ParseInt(strings.TrimPrefix(idx.Name, p), 10, 64) + return err == nil && i > 0 +} + +// IndexAttrChanged reports if the index attributes were changed. +// The default type is BTREE if no type was specified. +func (*diff) IndexAttrChanged(from, to []schema.Attr) bool { + t1 := &IndexType{T: IndexTypeBTree} + if sqlx.Has(from, t1) { + t1.T = strings.ToUpper(t1.T) + } + t2 := &IndexType{T: IndexTypeBTree} + if sqlx.Has(to, t2) { + t2.T = strings.ToUpper(t2.T) + } + if t1.T != t2.T { + return true + } + var p1, p2 IndexPredicate + if sqlx.Has(from, &p1) != sqlx.Has(to, &p2) || (p1.P != p2.P && p1.P != sqlx.MayWrap(p2.P)) { + return true + } + if indexIncludeChanged(from, to) { + return true + } + s1, ok1 := indexStorageParams(from) + s2, ok2 := indexStorageParams(to) + return ok1 != ok2 || ok1 && *s1 != *s2 +} + +// IndexPartAttrChanged reports if the index-part attributes were changed. +func (*diff) IndexPartAttrChanged(fromI, toI *schema.Index, i int) bool { + from, to := fromI.Parts[i], toI.Parts[i] + p1 := &IndexColumnProperty{NullsFirst: from.Desc, NullsLast: !from.Desc} + sqlx.Has(from.Attrs, p1) + p2 := &IndexColumnProperty{NullsFirst: to.Desc, NullsLast: !to.Desc} + sqlx.Has(to.Attrs, p2) + if p1.NullsFirst != p2.NullsFirst || p1.NullsLast != p2.NullsLast { + return true + } + var fromOp, toOp IndexOpClass + switch fromHas, toHas := sqlx.Has(from.Attrs, &fromOp), sqlx.Has(to.Attrs, &toOp); { + case fromHas && toHas: + return !fromOp.Equal(&toOp) + case toHas: + // Report a change if a non-default operator class was added. + d, err := toOp.DefaultFor(toI, toI.Parts[i]) + return !d && err == nil + case fromHas: + // Report a change if a non-default operator class was removed. + d, err := fromOp.DefaultFor(fromI, fromI.Parts[i]) + return !d && err == nil + default: + return false + } +} + +// ReferenceChanged reports if the foreign key referential action was changed. +func (*diff) ReferenceChanged(from, to schema.ReferenceOption) bool { + // According to PostgreSQL, the NO ACTION rule is set + // if no referential action was defined in foreign key. + if from == "" { + from = schema.NoAction + } + if to == "" { + to = schema.NoAction + } + return from != to +} + +func (d *diff) typeChanged(from, to *schema.Column) (bool, error) { + fromT, toT := from.Type.Type, to.Type.Type + if fromT == nil || toT == nil { + return false, fmt.Errorf("postgres: missing type information for column %q", from.Name) + } + if reflect.TypeOf(fromT) != reflect.TypeOf(toT) { + return true, nil + } + var changed bool + switch fromT := fromT.(type) { + case *schema.BinaryType, *BitType, *schema.BoolType, *schema.DecimalType, *schema.FloatType, *IntervalType, + *schema.IntegerType, *schema.JSONType, *OIDType, *RangeType, *SerialType, *schema.SpatialType, + *schema.StringType, *schema.TimeType, *TextSearchType, *NetworkType, *UserDefinedType, *schema.UUIDType: + t1, err := FormatType(toT) + if err != nil { + return false, err + } + t2, err := FormatType(fromT) + if err != nil { + return false, err + } + changed = t1 != t2 + case *schema.EnumType: + toT := toT.(*schema.EnumType) + // Column type was changed if the underlying enum type was changed or values are not equal. + changed = !sqlx.ValuesEqual(fromT.Values, toT.Values) || fromT.T != toT.T || + (toT.Schema != nil && fromT.Schema != nil && fromT.Schema.Name != toT.Schema.Name) + case *CurrencyType: + toT := toT.(*CurrencyType) + changed = fromT.T != toT.T + case *XMLType: + toT := toT.(*XMLType) + changed = fromT.T != toT.T + case *ArrayType: + toT := toT.(*ArrayType) + // Same type. + if changed = fromT.T != toT.T; !changed { + // In case it is an enum type, compare its values. + fromE, ok1 := fromT.Type.(*schema.EnumType) + toE, ok2 := toT.Type.(*schema.EnumType) + changed = ok1 && ok2 && !sqlx.ValuesEqual(fromE.Values, toE.Values) + break + } + // In case the desired schema is not normalized, the string type can look different even + // if the two strings represent the same array type (varchar(1), character varying (1)). + // Therefore, we try by comparing the underlying types if they were defined. + if fromT.Type != nil && toT.Type != nil { + t1, err := FormatType(fromT.Type) + if err != nil { + return false, err + } + t2, err := FormatType(toT.Type) + if err != nil { + return false, err + } + // Same underlying type. + changed = t1 != t2 + } + default: + return false, &sqlx.UnsupportedTypeError{Type: fromT} + } + return changed, nil +} + +// defaultEqual reports if the DEFAULT values x and y +// equal according to the database engine. +func (d *diff) defaultEqual(from, to schema.Expr) (bool, error) { + var ( + b bool + d1, d2 string + ) + switch from := from.(type) { + case *schema.Literal: + d1 = quote(from.V) + case *schema.RawExpr: + d1 = from.X + } + switch to := to.(type) { + case *schema.Literal: + d2 = quote(to.V) + case *schema.RawExpr: + d2 = to.X + } + // The DEFAULT expressions are safe to be inlined in the SELECT + // statement same as we inline them in the CREATE TABLE statement. + rows, err := d.QueryContext(context.Background(), fmt.Sprintf("SELECT %s = %s", d1, d2)) + if err != nil { + return false, err + } + if err := sqlx.ScanOne(rows, &b); err != nil { + return false, err + } + return b, nil +} + +// Default IDENTITY attributes. +const ( + defaultIdentityGen = "BY DEFAULT" + defaultSeqStart = 1 + defaultSeqIncrement = 1 +) + +// identityChanged reports if one of the identity attributes was changed. +func identityChanged(from, to []schema.Attr) bool { + i1, ok1 := identity(from) + i2, ok2 := identity(to) + if !ok1 && !ok2 || ok1 != ok2 { + return ok1 != ok2 + } + return i1.Generation != i2.Generation || i1.Sequence.Start != i2.Sequence.Start || i1.Sequence.Increment != i2.Sequence.Increment +} + +func identity(attrs []schema.Attr) (*Identity, bool) { + i := &Identity{} + if !sqlx.Has(attrs, i) { + return nil, false + } + if i.Generation == "" { + i.Generation = defaultIdentityGen + } + if i.Sequence == nil { + i.Sequence = &Sequence{Start: defaultSeqStart, Increment: defaultSeqIncrement} + return i, true + } + if i.Sequence.Start == 0 { + i.Sequence.Start = defaultSeqStart + } + if i.Sequence.Increment == 0 { + i.Sequence.Increment = defaultSeqIncrement + } + return i, true +} + +// formatPartition returns the string representation of the +// partition key according to the PostgreSQL format/grammar. +func formatPartition(p Partition) (string, error) { + b := &sqlx.Builder{QuoteChar: '"'} + b.P("PARTITION BY") + switch t := strings.ToUpper(p.T); t { + case PartitionTypeRange, PartitionTypeList, PartitionTypeHash: + b.P(t) + default: + return "", fmt.Errorf("unknown partition type: %q", t) + } + if len(p.Parts) == 0 { + return "", errors.New("missing parts for partition key") + } + b.Wrap(func(b *sqlx.Builder) { + b.MapComma(p.Parts, func(i int, b *sqlx.Builder) { + switch k := p.Parts[i]; { + case k.C != nil: + b.Ident(k.C.Name) + case k.X != nil: + b.P(sqlx.MayWrap(k.X.(*schema.RawExpr).X)) + } + }) + }) + return b.String(), nil +} + +// indexStorageParams returns the index storage parameters from the attributes +// in case it is there, and it is not the default. +func indexStorageParams(attrs []schema.Attr) (*IndexStorageParams, bool) { + s := &IndexStorageParams{} + if !sqlx.Has(attrs, s) { + return nil, false + } + if !s.AutoSummarize && (s.PagesPerRange == 0 || s.PagesPerRange == defaultPagePerRange) { + return nil, false + } + return s, true +} + +// indexIncludeChanged reports if the INCLUDE attribute clause was changed. +func indexIncludeChanged(from, to []schema.Attr) bool { + var fromI, toI IndexInclude + if sqlx.Has(from, &fromI) != sqlx.Has(to, &toI) || len(fromI.Columns) != len(toI.Columns) { + return true + } + for i := range fromI.Columns { + if fromI.Columns[i].Name != toI.Columns[i].Name { + return true + } + } + return false +} + +func trimCast(s string) string { + i := strings.LastIndex(s, "::") + if i == -1 { + return s + } + for _, r := range s[i+2:] { + if r != ' ' && !unicode.IsLetter(r) { + return s + } + } + return s[:i] +} diff --git a/vendor/ariga.io/atlas/sql/postgres/driver.go b/vendor/ariga.io/atlas/sql/postgres/driver.go new file mode 100644 index 00000000..a1194245 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/postgres/driver.go @@ -0,0 +1,458 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + "hash/fnv" + "net/url" + "strconv" + "time" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" + "ariga.io/atlas/sql/sqlclient" +) + +type ( + // Driver represents a PostgreSQL driver for introspecting database schemas, + // generating diff between schema elements and apply migrations changes. + Driver struct { + conn + schema.Differ + schema.Inspector + migrate.PlanApplier + } + + // database connection and its information. + conn struct { + schema.ExecQuerier + // The schema in the `search_path` parameter (if given). + schema string + // System variables that are set on `Open`. + collate string + ctype string + version int + crdb bool + } +) + +// DriverName holds the name used for registration. +const DriverName = "postgres" + +func init() { + sqlclient.Register( + DriverName, + sqlclient.OpenerFunc(opener), + sqlclient.RegisterDriverOpener(Open), + sqlclient.RegisterFlavours("postgresql"), + sqlclient.RegisterCodec(MarshalHCL, EvalHCL), + sqlclient.RegisterURLParser(parser{}), + ) +} + +func opener(_ context.Context, u *url.URL) (*sqlclient.Client, error) { + ur := parser{}.ParseURL(u) + db, err := sql.Open(DriverName, ur.DSN) + if err != nil { + return nil, err + } + drv, err := Open(db) + if err != nil { + if cerr := db.Close(); cerr != nil { + err = fmt.Errorf("%w: %v", err, cerr) + } + return nil, err + } + switch drv := drv.(type) { + case *Driver: + drv.schema = ur.Schema + case noLockDriver: + drv.noLocker.(*Driver).schema = ur.Schema + } + return &sqlclient.Client{ + Name: DriverName, + DB: db, + URL: ur, + Driver: drv, + }, nil +} + +// Open opens a new PostgreSQL driver. +func Open(db schema.ExecQuerier) (migrate.Driver, error) { + c := conn{ExecQuerier: db} + rows, err := db.QueryContext(context.Background(), paramsQuery) + if err != nil { + return nil, fmt.Errorf("postgres: scanning system variables: %w", err) + } + params, err := sqlx.ScanStrings(rows) + if err != nil { + return nil, fmt.Errorf("postgres: failed scanning rows: %w", err) + } + if len(params) != 3 && len(params) != 4 { + return nil, fmt.Errorf("postgres: unexpected number of rows: %d", len(params)) + } + c.ctype, c.collate = params[1], params[2] + if c.version, err = strconv.Atoi(params[0]); err != nil { + return nil, fmt.Errorf("postgres: malformed version: %s: %w", params[0], err) + } + if c.version < 10_00_00 { + return nil, fmt.Errorf("postgres: unsupported postgres version: %d", c.version) + } + // Means we are connected to CockroachDB because we have a result for name='crdb_version'. see `paramsQuery`. + if c.crdb = len(params) == 4; c.crdb { + return noLockDriver{ + &Driver{ + conn: c, + Differ: &sqlx.Diff{DiffDriver: &crdbDiff{diff{c}}}, + Inspector: &crdbInspect{inspect{c}}, + PlanApplier: &planApply{c}, + }, + }, nil + } + return &Driver{ + conn: c, + Differ: &sqlx.Diff{DiffDriver: &diff{c}}, + Inspector: &inspect{c}, + PlanApplier: &planApply{c}, + }, nil +} + +func (d *Driver) dev() *sqlx.DevDriver { + return &sqlx.DevDriver{ + Driver: d, + MaxNameLen: 63, + PatchColumn: func(s *schema.Schema, c *schema.Column) { + if e, ok := hasEnumType(c); ok { + e.Schema = s + } + }, + } +} + +// NormalizeRealm returns the normal representation of the given database. +func (d *Driver) NormalizeRealm(ctx context.Context, r *schema.Realm) (*schema.Realm, error) { + return d.dev().NormalizeRealm(ctx, r) +} + +// NormalizeSchema returns the normal representation of the given database. +func (d *Driver) NormalizeSchema(ctx context.Context, s *schema.Schema) (*schema.Schema, error) { + return d.dev().NormalizeSchema(ctx, s) +} + +// Lock implements the schema.Locker interface. +func (d *Driver) Lock(ctx context.Context, name string, timeout time.Duration) (schema.UnlockFunc, error) { + conn, err := sqlx.SingleConn(ctx, d.ExecQuerier) + if err != nil { + return nil, err + } + h := fnv.New32() + h.Write([]byte(name)) + id := h.Sum32() + if err := acquire(ctx, conn, id, timeout); err != nil { + conn.Close() + return nil, err + } + return func() error { + defer conn.Close() + rows, err := conn.QueryContext(ctx, "SELECT pg_advisory_unlock($1)", id) + if err != nil { + return err + } + switch released, err := sqlx.ScanNullBool(rows); { + case err != nil: + return err + case !released.Valid || !released.Bool: + return fmt.Errorf("sql/postgres: failed releasing lock %d", id) + } + return nil + }, nil +} + +// Snapshot implements migrate.Snapshoter. +func (d *Driver) Snapshot(ctx context.Context) (migrate.RestoreFunc, error) { + // Postgres will only then be considered bound to a schema if the `search_path` was given. + // In all other cases, the connection is considered bound to the realm. + if d.schema != "" { + s, err := d.InspectSchema(ctx, d.schema, nil) + if err != nil { + return nil, err + } + if len(s.Tables) > 0 { + return nil, &migrate.NotCleanError{Reason: fmt.Sprintf("found table %q in connected schema", s.Tables[0].Name)} + } + return func(ctx context.Context) error { + current, err := d.InspectSchema(ctx, s.Name, nil) + if err != nil { + return err + } + changes, err := d.SchemaDiff(current, s) + if err != nil { + return err + } + return d.ApplyChanges(ctx, withCascade(changes)) + }, nil + } + // Not bound to a schema. + realm, err := d.InspectRealm(ctx, nil) + if err != nil { + return nil, err + } + restore := func(ctx context.Context) error { + current, err := d.InspectRealm(ctx, nil) + if err != nil { + return err + } + changes, err := d.RealmDiff(current, realm) + if err != nil { + return err + } + return d.ApplyChanges(ctx, withCascade(changes)) + } + // Postgres is considered clean, if there are no schemas or the public schema has no tables. + if len(realm.Schemas) == 0 { + return restore, nil + } + if s, ok := realm.Schema("public"); len(realm.Schemas) == 1 && ok { + if len(s.Tables) > 0 { + return nil, &migrate.NotCleanError{Reason: fmt.Sprintf("found table %q in schema %q", s.Tables[0].Name, s.Name)} + } + return restore, nil + } + return nil, &migrate.NotCleanError{Reason: fmt.Sprintf("found schema %q", realm.Schemas[0].Name)} +} + +func withCascade(changes schema.Changes) schema.Changes { + for _, c := range changes { + if d, ok := c.(*schema.DropTable); ok { + d.Extra = append(d.Extra, &Cascade{}) + } + } + return changes +} + +// CheckClean implements migrate.CleanChecker. +func (d *Driver) CheckClean(ctx context.Context, revT *migrate.TableIdent) error { + if revT == nil { // accept nil values + revT = &migrate.TableIdent{} + } + if d.schema != "" { + switch s, err := d.InspectSchema(ctx, d.schema, nil); { + case err != nil: + return err + case len(s.Tables) == 0, (revT.Schema == "" || s.Name == revT.Schema) && len(s.Tables) == 1 && s.Tables[0].Name == revT.Name: + return nil + default: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found table %q in schema %q", s.Tables[0].Name, s.Name)} + } + } + r, err := d.InspectRealm(ctx, nil) + if err != nil { + return err + } + for _, s := range r.Schemas { + switch { + case len(s.Tables) == 0 && s.Name == "public": + case len(s.Tables) == 0 || s.Name != revT.Schema: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found schema %q", s.Name)} + case len(s.Tables) > 1: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found %d tables in schema %q", len(s.Tables), s.Name)} + case len(s.Tables) == 1 && s.Tables[0].Name != revT.Name: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found table %q in schema %q", s.Tables[0].Name, s.Name)} + } + } + return nil +} + +// Version returns the version of the connected database. +func (d *Driver) Version() string { + return strconv.Itoa(d.conn.version) +} + +func acquire(ctx context.Context, conn schema.ExecQuerier, id uint32, timeout time.Duration) error { + switch { + // With timeout (context-based). + case timeout > 0: + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + fallthrough + // Infinite timeout. + case timeout < 0: + rows, err := conn.QueryContext(ctx, "SELECT pg_advisory_lock($1)", id) + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + err = schema.ErrLocked + } + if err != nil { + return err + } + return rows.Close() + // No timeout. + default: + rows, err := conn.QueryContext(ctx, "SELECT pg_try_advisory_lock($1)", id) + if err != nil { + return err + } + acquired, err := sqlx.ScanNullBool(rows) + if err != nil { + return err + } + if !acquired.Bool { + return schema.ErrLocked + } + return nil + } +} + +// supportsIndexInclude reports if the server supports the INCLUDE clause. +func (c *conn) supportsIndexInclude() bool { + return c.version >= 11_00_00 +} + +type parser struct{} + +// ParseURL implements the sqlclient.URLParser interface. +func (parser) ParseURL(u *url.URL) *sqlclient.URL { + return &sqlclient.URL{URL: u, DSN: u.String(), Schema: u.Query().Get("search_path")} +} + +// ChangeSchema implements the sqlclient.SchemaChanger interface. +func (parser) ChangeSchema(u *url.URL, s string) *url.URL { + nu := *u + q := nu.Query() + q.Set("search_path", s) + nu.RawQuery = q.Encode() + return &nu +} + +// Standard column types (and their aliases) as defined in +// PostgreSQL codebase/website. +const ( + TypeBit = "bit" + TypeBitVar = "bit varying" + TypeBoolean = "boolean" + TypeBool = "bool" // boolean. + TypeBytea = "bytea" + + TypeCharacter = "character" + TypeChar = "char" // character + TypeCharVar = "character varying" + TypeVarChar = "varchar" // character varying + TypeText = "text" + typeName = "name" // internal type for object names + + TypeSmallInt = "smallint" + TypeInteger = "integer" + TypeBigInt = "bigint" + TypeInt = "int" // integer. + TypeInt2 = "int2" // smallint. + TypeInt4 = "int4" // integer. + TypeInt8 = "int8" // bigint. + + TypeCIDR = "cidr" + TypeInet = "inet" + TypeMACAddr = "macaddr" + TypeMACAddr8 = "macaddr8" + + TypeCircle = "circle" + TypeLine = "line" + TypeLseg = "lseg" + TypeBox = "box" + TypePath = "path" + TypePolygon = "polygon" + TypePoint = "point" + + TypeDate = "date" + TypeTime = "time" // time without time zone + TypeTimeTZ = "timetz" // time with time zone + TypeTimeWTZ = "time with time zone" + TypeTimeWOTZ = "time without time zone" + TypeTimestamp = "timestamp" // timestamp without time zone + TypeTimestampTZ = "timestamptz" + TypeTimestampWTZ = "timestamp with time zone" + TypeTimestampWOTZ = "timestamp without time zone" + + TypeDouble = "double precision" + TypeReal = "real" + TypeFloat8 = "float8" // double precision + TypeFloat4 = "float4" // real + TypeFloat = "float" // float(p). + + TypeNumeric = "numeric" + TypeDecimal = "decimal" // numeric + + TypeSmallSerial = "smallserial" // smallint with auto_increment. + TypeSerial = "serial" // integer with auto_increment. + TypeBigSerial = "bigserial" // bigint with auto_increment. + TypeSerial2 = "serial2" // smallserial + TypeSerial4 = "serial4" // serial + TypeSerial8 = "serial8" // bigserial + + TypeArray = "array" + TypeXML = "xml" + TypeJSON = "json" + TypeJSONB = "jsonb" + TypeUUID = "uuid" + TypeMoney = "money" + TypeInterval = "interval" + TypeTSQuery = "tsquery" + TypeTSVector = "tsvector" + TypeUserDefined = "user-defined" + + TypeInt4Range = "int4range" + TypeInt4MultiRange = "int4multirange" + TypeInt8Range = "int8range" + TypeInt8MultiRange = "int8multirange" + TypeNumRange = "numrange" + TypeNumMultiRange = "nummultirange" + TypeTSRange = "tsrange" + TypeTSMultiRange = "tsmultirange" + TypeTSTZRange = "tstzrange" + TypeTSTZMultiRange = "tstzmultirange" + TypeDateRange = "daterange" + TypeDateMultiRange = "datemultirange" + + // PostgreSQL internal object types and their aliases. + typeOID = "oid" + typeRegClass = "regclass" + typeRegCollation = "regcollation" + typeRegConfig = "regconfig" + typeRegDictionary = "regdictionary" + typeRegNamespace = "regnamespace" + typeRegOper = "regoper" + typeRegOperator = "regoperator" + typeRegProc = "regproc" + typeRegProcedure = "regprocedure" + typeRegRole = "regrole" + typeRegType = "regtype" +) + +// List of supported index types. +const ( + IndexTypeBTree = "BTREE" + IndexTypeBRIN = "BRIN" + IndexTypeHash = "HASH" + IndexTypeGIN = "GIN" + IndexTypeGiST = "GIST" + IndexTypeSPGiST = "SPGIST" + defaultPagePerRange = 128 +) + +// List of "GENERATED" types. +const ( + GeneratedTypeAlways = "ALWAYS" + GeneratedTypeByDefault = "BY_DEFAULT" // BY DEFAULT. +) + +// List of PARTITION KEY types. +const ( + PartitionTypeRange = "RANGE" + PartitionTypeList = "LIST" + PartitionTypeHash = "HASH" +) diff --git a/vendor/ariga.io/atlas/sql/postgres/inspect.go b/vendor/ariga.io/atlas/sql/postgres/inspect.go new file mode 100644 index 00000000..89ea7c41 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/postgres/inspect.go @@ -0,0 +1,1277 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package postgres + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" + "sync" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/postgres/internal/postgresop" + "ariga.io/atlas/sql/schema" +) + +// A diff provides a PostgreSQL implementation for schema.Inspector. +type inspect struct{ conn } + +var _ schema.Inspector = (*inspect)(nil) + +// InspectRealm returns schema descriptions of all resources in the given realm. +func (i *inspect) InspectRealm(ctx context.Context, opts *schema.InspectRealmOption) (*schema.Realm, error) { + schemas, err := i.schemas(ctx, opts) + if err != nil { + return nil, err + } + if opts == nil { + opts = &schema.InspectRealmOption{} + } + r := schema.NewRealm(schemas...).SetCollation(i.collate) + r.Attrs = append(r.Attrs, &CType{V: i.ctype}) + if len(schemas) == 0 || !sqlx.ModeInspectRealm(opts).Is(schema.InspectTables) { + return sqlx.ExcludeRealm(r, opts.Exclude) + } + if err := i.inspectTables(ctx, r, nil); err != nil { + return nil, err + } + sqlx.LinkSchemaTables(schemas) + return sqlx.ExcludeRealm(r, opts.Exclude) +} + +// InspectSchema returns schema descriptions of the tables in the given schema. +// If the schema name is empty, the result will be the attached schema. +func (i *inspect) InspectSchema(ctx context.Context, name string, opts *schema.InspectOptions) (s *schema.Schema, err error) { + schemas, err := i.schemas(ctx, &schema.InspectRealmOption{Schemas: []string{name}}) + if err != nil { + return nil, err + } + switch n := len(schemas); { + case n == 0: + return nil, &schema.NotExistError{Err: fmt.Errorf("postgres: schema %q was not found", name)} + case n > 1: + return nil, fmt.Errorf("postgres: %d schemas were found for %q", n, name) + } + if opts == nil { + opts = &schema.InspectOptions{} + } + r := schema.NewRealm(schemas...).SetCollation(i.collate) + r.Attrs = append(r.Attrs, &CType{V: i.ctype}) + if sqlx.ModeInspectSchema(opts).Is(schema.InspectTables) { + if err := i.inspectTables(ctx, r, opts); err != nil { + return nil, err + } + sqlx.LinkSchemaTables(schemas) + } + return sqlx.ExcludeSchema(r.Schemas[0], opts.Exclude) +} + +func (i *inspect) inspectTables(ctx context.Context, r *schema.Realm, opts *schema.InspectOptions) error { + if err := i.tables(ctx, r, opts); err != nil { + return err + } + for _, s := range r.Schemas { + if len(s.Tables) == 0 { + continue + } + if err := i.columns(ctx, s); err != nil { + return err + } + if err := i.indexes(ctx, s); err != nil { + return err + } + if err := i.partitions(s); err != nil { + return err + } + if err := i.fks(ctx, s); err != nil { + return err + } + if err := i.checks(ctx, s); err != nil { + return err + } + } + return nil +} + +// table returns the table from the database, or a NotExistError if the table was not found. +func (i *inspect) tables(ctx context.Context, realm *schema.Realm, opts *schema.InspectOptions) error { + var ( + args []any + query = fmt.Sprintf(tablesQuery, nArgs(0, len(realm.Schemas))) + ) + for _, s := range realm.Schemas { + args = append(args, s.Name) + } + if opts != nil && len(opts.Tables) > 0 { + for _, t := range opts.Tables { + args = append(args, t) + } + query = fmt.Sprintf(tablesQueryArgs, nArgs(0, len(realm.Schemas)), nArgs(len(realm.Schemas), len(opts.Tables))) + } + rows, err := i.QueryContext(ctx, query, args...) + if err != nil { + return err + } + defer rows.Close() + for rows.Next() { + var tSchema, name, comment, partattrs, partstart, partexprs sql.NullString + if err := rows.Scan(&tSchema, &name, &comment, &partattrs, &partstart, &partexprs); err != nil { + return fmt.Errorf("scan table information: %w", err) + } + if !sqlx.ValidString(tSchema) || !sqlx.ValidString(name) { + return fmt.Errorf("invalid schema or table name: %q.%q", tSchema.String, name.String) + } + s, ok := realm.Schema(tSchema.String) + if !ok { + return fmt.Errorf("schema %q was not found in realm", tSchema.String) + } + t := &schema.Table{Name: name.String} + s.AddTables(t) + if sqlx.ValidString(comment) { + t.SetComment(comment.String) + } + if sqlx.ValidString(partattrs) { + t.AddAttrs(&Partition{ + start: partstart.String, + attrs: partattrs.String, + exprs: partexprs.String, + }) + } + } + return rows.Close() +} + +// columns queries and appends the columns of the given table. +func (i *inspect) columns(ctx context.Context, s *schema.Schema) error { + query := columnsQuery + if i.crdb { + query = crdbColumnsQuery + } + rows, err := i.querySchema(ctx, query, s) + if err != nil { + return fmt.Errorf("postgres: querying schema %q columns: %w", s.Name, err) + } + defer rows.Close() + for rows.Next() { + if err := i.addColumn(s, rows); err != nil { + return fmt.Errorf("postgres: %w", err) + } + } + if err := rows.Close(); err != nil { + return err + } + return i.enumValues(ctx, s) +} + +// addColumn scans the current row and adds a new column from it to the table. +func (i *inspect) addColumn(s *schema.Schema, rows *sql.Rows) (err error) { + var ( + typid, typelem, maxlen, precision, timeprecision, scale, seqstart, seqinc, seqlast sql.NullInt64 + table, name, typ, fmtype, nullable, defaults, identity, genidentity, genexpr, charset, collate, comment, typtype, elemtyp, interval sql.NullString + ) + if err = rows.Scan( + &table, &name, &typ, &fmtype, &nullable, &defaults, &maxlen, &precision, &timeprecision, &scale, &interval, &charset, + &collate, &identity, &seqstart, &seqinc, &seqlast, &genidentity, &genexpr, &comment, &typtype, &typelem, &elemtyp, &typid, + ); err != nil { + return err + } + t, ok := s.Table(table.String) + if !ok { + return fmt.Errorf("table %q was not found in schema", table.String) + } + c := &schema.Column{ + Name: name.String, + Type: &schema.ColumnType{ + Raw: typ.String, + Null: nullable.String == "YES", + }, + } + c.Type.Type, err = columnType(&columnDesc{ + typ: typ.String, + fmtype: fmtype.String, + size: maxlen.Int64, + scale: scale.Int64, + typtype: typtype.String, + typelem: typelem.Int64, + elemtyp: elemtyp.String, + typid: typid.Int64, + interval: interval.String, + precision: precision.Int64, + timePrecision: &timeprecision.Int64, + }) + if defaults.Valid { + defaultExpr(c, defaults.String) + } + if identity.String == "YES" { + c.Attrs = append(c.Attrs, &Identity{ + Generation: genidentity.String, + Sequence: &Sequence{ + Last: seqlast.Int64, + Start: seqstart.Int64, + Increment: seqinc.Int64, + }, + }) + } + if sqlx.ValidString(genexpr) { + c.Attrs = append(c.Attrs, &schema.GeneratedExpr{ + Expr: genexpr.String, + }) + } + if sqlx.ValidString(comment) { + c.SetComment(comment.String) + } + if sqlx.ValidString(charset) { + c.SetCharset(charset.String) + } + if sqlx.ValidString(collate) { + c.SetCollation(collate.String) + } + t.Columns = append(t.Columns, c) + return nil +} + +// enumValues fills enum columns with their values from the database. +func (i *inspect) enumValues(ctx context.Context, s *schema.Schema) error { + var ( + args []any + ids = make(map[int64][]*schema.EnumType) + query = "SELECT enumtypid, enumlabel FROM pg_enum WHERE enumtypid IN (%s)" + newE = func(e1 *enumType) *schema.EnumType { + if _, ok := ids[e1.ID]; !ok { + args = append(args, e1.ID) + } + // Convert the intermediate type to + // the standard schema.EnumType. + e2 := &schema.EnumType{T: e1.T, Schema: s} + if e1.Schema != "" && e1.Schema != s.Name { + e2.Schema = schema.New(e1.Schema) + } + ids[e1.ID] = append(ids[e1.ID], e2) + return e2 + } + ) + for _, t := range s.Tables { + for _, c := range t.Columns { + switch t := c.Type.Type.(type) { + case *enumType: + e := newE(t) + c.Type.Type = e + c.Type.Raw = e.T + case *ArrayType: + if e, ok := t.Type.(*enumType); ok { + t.Type = newE(e) + } + } + } + } + if len(ids) == 0 { + return nil + } + rows, err := i.QueryContext(ctx, fmt.Sprintf(query, nArgs(0, len(args))), args...) + if err != nil { + return fmt.Errorf("postgres: querying enum values: %w", err) + } + defer rows.Close() + for rows.Next() { + var ( + id int64 + v string + ) + if err := rows.Scan(&id, &v); err != nil { + return fmt.Errorf("postgres: scanning enum label: %w", err) + } + for _, enum := range ids[id] { + enum.Values = append(enum.Values, v) + } + } + return nil +} + +// indexes queries and appends the indexes of the given table. +func (i *inspect) indexes(ctx context.Context, s *schema.Schema) error { + query := indexesQuery + switch { + case i.conn.crdb: + return i.crdbIndexes(ctx, s) + case !i.conn.supportsIndexInclude(): + query = indexesQueryNoInclude + } + rows, err := i.querySchema(ctx, query, s) + if err != nil { + return fmt.Errorf("postgres: querying schema %q indexes: %w", s.Name, err) + } + defer rows.Close() + if err := i.addIndexes(s, rows); err != nil { + return err + } + return rows.Err() +} + +// addIndexes scans the rows and adds the indexes to the table. +func (i *inspect) addIndexes(s *schema.Schema, rows *sql.Rows) error { + names := make(map[string]*schema.Index) + for rows.Next() { + var ( + uniq, primary, included bool + table, name, typ string + desc, nullsfirst, nullslast, opcdefault sql.NullBool + column, constraints, pred, expr, comment, options, opcname, opcparams sql.NullString + ) + if err := rows.Scan( + &table, &name, &typ, &column, &included, &primary, &uniq, &constraints, &pred, &expr, + &desc, &nullsfirst, &nullslast, &comment, &options, &opcname, &opcdefault, &opcparams, + ); err != nil { + return fmt.Errorf("postgres: scanning indexes for schema %q: %w", s.Name, err) + } + t, ok := s.Table(table) + if !ok { + return fmt.Errorf("table %q was not found in schema", table) + } + idx, ok := names[name] + if !ok { + idx = &schema.Index{ + Name: name, + Unique: uniq, + Table: t, + Attrs: []schema.Attr{ + &IndexType{T: typ}, + }, + } + if sqlx.ValidString(comment) { + idx.Attrs = append(idx.Attrs, &schema.Comment{Text: comment.String}) + } + if sqlx.ValidString(constraints) { + var m map[string]string + if err := json.Unmarshal([]byte(constraints.String), &m); err != nil { + return fmt.Errorf("postgres: unmarshaling index constraints: %w", err) + } + for n, t := range m { + idx.Attrs = append(idx.Attrs, &Constraint{N: n, T: t}) + } + } + if sqlx.ValidString(pred) { + idx.Attrs = append(idx.Attrs, &IndexPredicate{P: pred.String}) + } + if sqlx.ValidString(options) { + p, err := newIndexStorage(options.String) + if err != nil { + return err + } + idx.Attrs = append(idx.Attrs, p) + } + names[name] = idx + if primary { + t.PrimaryKey = idx + } else { + t.Indexes = append(t.Indexes, idx) + } + } + part := &schema.IndexPart{SeqNo: len(idx.Parts) + 1, Desc: desc.Bool} + if nullsfirst.Bool || nullslast.Bool { + part.Attrs = append(part.Attrs, &IndexColumnProperty{ + NullsFirst: nullsfirst.Bool, + NullsLast: nullslast.Bool, + }) + } + switch { + case included: + c, ok := t.Column(column.String) + if !ok { + return fmt.Errorf("postgres: INCLUDE column %q was not found for index %q", column.String, idx.Name) + } + var include IndexInclude + sqlx.Has(idx.Attrs, &include) + include.Columns = append(include.Columns, c) + schema.ReplaceOrAppend(&idx.Attrs, &include) + case sqlx.ValidString(column): + part.C, ok = t.Column(column.String) + if !ok { + return fmt.Errorf("postgres: column %q was not found for index %q", column.String, idx.Name) + } + part.C.Indexes = append(part.C.Indexes, idx) + idx.Parts = append(idx.Parts, part) + case sqlx.ValidString(expr): + part.X = &schema.RawExpr{ + X: expr.String, + } + idx.Parts = append(idx.Parts, part) + default: + return fmt.Errorf("postgres: invalid part for index %q", idx.Name) + } + if err := mayAppendOps(part, opcname.String, opcparams.String, opcdefault.Bool); err != nil { + return err + } + } + return nil +} + +// mayAppendOps appends an operator_class attribute to the part in case it is not the default. +func mayAppendOps(part *schema.IndexPart, name string, params string, defaults bool) error { + if name == "" || defaults && params == "" { + return nil + } + op := &IndexOpClass{Name: name, Default: defaults} + if err := op.parseParams(params); err != nil { + return err + } + part.Attrs = append(part.Attrs, op) + return nil +} + +// partitions builds the partition each table in the schema. +func (i *inspect) partitions(s *schema.Schema) error { + for _, t := range s.Tables { + var d Partition + if !sqlx.Has(t.Attrs, &d) { + continue + } + switch s := strings.ToLower(d.start); s { + case "r": + d.T = PartitionTypeRange + case "l": + d.T = PartitionTypeList + case "h": + d.T = PartitionTypeHash + default: + return fmt.Errorf("postgres: unexpected partition strategy %q", s) + } + idxs := strings.Split(strings.TrimSpace(d.attrs), " ") + if len(idxs) == 0 { + return fmt.Errorf("postgres: no columns/expressions were found in partition key for column %q", t.Name) + } + for i := range idxs { + switch idx, err := strconv.Atoi(idxs[i]); { + case err != nil: + return fmt.Errorf("postgres: faild parsing partition key index %q", idxs[i]) + // An expression. + case idx == 0: + j := sqlx.ExprLastIndex(d.exprs) + if j == -1 { + return fmt.Errorf("postgres: no expression found in partition key: %q", d.exprs) + } + d.Parts = append(d.Parts, &PartitionPart{ + X: &schema.RawExpr{X: d.exprs[:j+1]}, + }) + d.exprs = strings.TrimPrefix(d.exprs[j+1:], ", ") + // A column at index idx-1. + default: + if idx > len(t.Columns) { + return fmt.Errorf("postgres: unexpected column index %d", idx) + } + d.Parts = append(d.Parts, &PartitionPart{ + C: t.Columns[idx-1], + }) + } + } + schema.ReplaceOrAppend(&t.Attrs, &d) + } + return nil +} + +// fks queries and appends the foreign keys of the given table. +func (i *inspect) fks(ctx context.Context, s *schema.Schema) error { + rows, err := i.querySchema(ctx, fksQuery, s) + if err != nil { + return fmt.Errorf("postgres: querying schema %q foreign keys: %w", s.Name, err) + } + defer rows.Close() + if err := sqlx.SchemaFKs(s, rows); err != nil { + return fmt.Errorf("postgres: %w", err) + } + return rows.Err() +} + +// checks queries and appends the check constraints of the given table. +func (i *inspect) checks(ctx context.Context, s *schema.Schema) error { + rows, err := i.querySchema(ctx, checksQuery, s) + if err != nil { + return fmt.Errorf("postgres: querying schema %q check constraints: %w", s.Name, err) + } + defer rows.Close() + if err := i.addChecks(s, rows); err != nil { + return err + } + return rows.Err() +} + +// addChecks scans the rows and adds the checks to the table. +func (i *inspect) addChecks(s *schema.Schema, rows *sql.Rows) error { + names := make(map[string]*schema.Check) + for rows.Next() { + var ( + noInherit bool + table, name, column, clause, indexes string + ) + if err := rows.Scan(&table, &name, &clause, &column, &indexes, &noInherit); err != nil { + return fmt.Errorf("postgres: scanning check: %w", err) + } + t, ok := s.Table(table) + if !ok { + return fmt.Errorf("table %q was not found in schema", table) + } + if _, ok := t.Column(column); !ok { + return fmt.Errorf("postgres: column %q was not found for check %q", column, name) + } + check, ok := names[name] + if !ok { + check = &schema.Check{Name: name, Expr: clause, Attrs: []schema.Attr{&CheckColumns{}}} + if noInherit { + check.Attrs = append(check.Attrs, &NoInherit{}) + } + names[name] = check + t.Attrs = append(t.Attrs, check) + } + c := check.Attrs[0].(*CheckColumns) + c.Columns = append(c.Columns, column) + } + return nil +} + +// schemas returns the list of the schemas in the database. +func (i *inspect) schemas(ctx context.Context, opts *schema.InspectRealmOption) ([]*schema.Schema, error) { + var ( + args []any + query = schemasQuery + ) + if opts != nil { + switch n := len(opts.Schemas); { + case n == 1 && opts.Schemas[0] == "": + query = fmt.Sprintf(schemasQueryArgs, "= CURRENT_SCHEMA()") + case n == 1 && opts.Schemas[0] != "": + query = fmt.Sprintf(schemasQueryArgs, "= $1") + args = append(args, opts.Schemas[0]) + case n > 0: + query = fmt.Sprintf(schemasQueryArgs, "IN ("+nArgs(0, len(opts.Schemas))+")") + for _, s := range opts.Schemas { + args = append(args, s) + } + } + } + rows, err := i.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("postgres: querying schemas: %w", err) + } + defer rows.Close() + var schemas []*schema.Schema + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return nil, err + } + schemas = append(schemas, &schema.Schema{ + Name: name, + }) + } + if err := rows.Close(); err != nil { + return nil, err + } + return schemas, nil +} + +func (i *inspect) querySchema(ctx context.Context, query string, s *schema.Schema) (*sql.Rows, error) { + args := []any{s.Name} + for _, t := range s.Tables { + args = append(args, t.Name) + } + return i.QueryContext(ctx, fmt.Sprintf(query, nArgs(1, len(s.Tables))), args...) +} + +func nArgs(start, n int) string { + var b strings.Builder + for i := 1; i <= n; i++ { + if i > 1 { + b.WriteString(", ") + } + b.WriteByte('$') + b.WriteString(strconv.Itoa(start + i)) + } + return b.String() +} + +var reNextval = regexp.MustCompile(`(?i) *nextval\('(?:")?(?:[\w$]+\.)*([\w$]+_[\w$]+_seq)(?:")?'(?:::regclass)*\) *$`) + +func defaultExpr(c *schema.Column, s string) { + switch m := reNextval.FindStringSubmatch(s); { + // The definition of " " is equivalent to specifying: + // " NOT NULL DEFAULT nextval('
__seq')". + // https://postgresql.org/docs/current/datatype-numeric.html#DATATYPE-SERIAL. + case len(m) == 2: + tt, ok := c.Type.Type.(*schema.IntegerType) + if !ok { + return + } + st := &SerialType{SequenceName: m[1]} + st.SetType(tt) + c.Type.Raw = st.T + c.Type.Type = st + case sqlx.IsLiteralBool(s), sqlx.IsLiteralNumber(s), sqlx.IsQuoted(s, '\''): + c.Default = &schema.Literal{V: s} + default: + var x schema.Expr = &schema.RawExpr{X: s} + // Try casting or fallback to raw expressions (e.g. column text[] has the default of '{}':text[]). + if v, ok := canConvert(c.Type, s); ok { + x = &schema.Literal{V: v} + } + c.Default = x + } +} + +func canConvert(t *schema.ColumnType, x string) (string, bool) { + i := strings.LastIndex(x, "::") + if i == -1 || !sqlx.IsQuoted(x[:i], '\'') { + return "", false + } + q := x[0:i] + x = x[1 : i-1] + switch t.Type.(type) { + case *enumType: + return q, true + case *schema.BoolType: + if sqlx.IsLiteralBool(x) { + return x, true + } + case *schema.DecimalType, *schema.IntegerType, *schema.FloatType: + if sqlx.IsLiteralNumber(x) { + return x, true + } + case *ArrayType, *schema.BinaryType, *schema.JSONType, *NetworkType, *schema.SpatialType, *schema.StringType, *schema.TimeType, *schema.UUIDType, *XMLType: + return q, true + } + return "", false +} + +type ( + // CType describes the character classification setting (LC_CTYPE). + CType struct { + schema.Attr + V string + } + + // UserDefinedType defines a user-defined type attribute. + UserDefinedType struct { + schema.Type + T string + } + + // enumType represents an enum type. It serves aa intermediate representation of a Postgres enum type, + // to temporary save TypeID and TypeName of an enum column until the enum values can be extracted. + enumType struct { + schema.Type + T string // Type name. + Schema string // Optional schema name. + ID int64 // Type id. + Values []string + } + + // ArrayType defines an array type. + // https://postgresql.org/docs/current/arrays.html + ArrayType struct { + schema.Type // Underlying items type (e.g. varchar(255)). + T string // Formatted type (e.g. int[]). + } + + // BitType defines a bit type. + // https://postgresql.org/docs/current/datatype-bit.html + BitType struct { + schema.Type + T string + Len int64 + } + + // IntervalType defines an interval type. + // https://postgresql.org/docs/current/datatype-datetime.html + IntervalType struct { + schema.Type + T string // Type name. + F string // Optional field. YEAR, MONTH, ..., MINUTE TO SECOND. + Precision *int // Optional precision. + } + + // A NetworkType defines a network type. + // https://postgresql.org/docs/current/datatype-net-types.html + NetworkType struct { + schema.Type + T string + Len int64 + } + + // A CurrencyType defines a currency type. + CurrencyType struct { + schema.Type + T string + } + + // A RangeType defines a range type. + // https://www.postgresql.org/docs/current/rangetypes.html + RangeType struct { + schema.Type + T string + } + + // A SerialType defines a serial type. + // https://postgresql.org/docs/current/datatype-numeric.html#DATATYPE-SERIAL + SerialType struct { + schema.Type + T string + Precision int + // SequenceName holds the inspected sequence name attached to the column. + // It defaults to
__seq when the column is created, but may + // be different in case the table or the column was renamed. + SequenceName string + } + + // A TextSearchType defines full text search types. + // https://www.postgresql.org/docs/current/datatype-textsearch.html + TextSearchType struct { + schema.Type + T string + } + + // UUIDType is alias to schema.UUIDType. + // Defined here for backward compatibility reasons. + UUIDType = schema.UUIDType + + // OIDType defines an object identifier type. + OIDType struct { + schema.Type + T string + } + + // A XMLType defines an XML type. + XMLType struct { + schema.Type + T string + } + + // Constraint describes a postgres constraint. + // https://postgresql.org/docs/current/catalog-pg-constraint.html + Constraint struct { + schema.Attr + N string // constraint name + T string // c, f, p, u, t, x. + } + + // Sequence defines (the supported) sequence options. + // https://postgresql.org/docs/current/sql-createsequence.html + Sequence struct { + Start, Increment int64 + // Last sequence value written to disk. + // https://postgresql.org/docs/current/view-pg-sequences.html. + Last int64 + } + + // Identity defines an identity column. + Identity struct { + schema.Attr + Generation string // ALWAYS, BY DEFAULT. + Sequence *Sequence + } + + // IndexType represents an index type. + // https://postgresql.org/docs/current/indexes-types.html + IndexType struct { + schema.Attr + T string // BTREE, BRIN, HASH, GiST, SP-GiST, GIN. + } + + // IndexPredicate describes a partial index predicate. + // https://postgresql.org/docs/current/catalog-pg-index.html + IndexPredicate struct { + schema.Attr + P string + } + + // IndexColumnProperty describes an index column property. + // https://postgresql.org/docs/current/functions-info.html#FUNCTIONS-INFO-INDEX-COLUMN-PROPS + IndexColumnProperty struct { + schema.Attr + // NullsFirst defaults to true for DESC indexes. + NullsFirst bool + // NullsLast defaults to true for ASC indexes. + NullsLast bool + } + + // IndexStorageParams describes index storage parameters add with the WITH clause. + // https://postgresql.org/docs/current/sql-createindex.html#SQL-CREATEINDEX-STORAGE-PARAMETERS + IndexStorageParams struct { + schema.Attr + // AutoSummarize defines the authsummarize storage parameter. + AutoSummarize bool + // PagesPerRange defines pages_per_range storage + // parameter for BRIN indexes. Defaults to 128. + PagesPerRange int64 + } + + // IndexInclude describes the INCLUDE clause allows specifying + // a list of column which added to the index as non-key columns. + // https://www.postgresql.org/docs/current/sql-createindex.html + IndexInclude struct { + schema.Attr + Columns []*schema.Column + } + + // IndexOpClass describers operator class of the index part. + // https://www.postgresql.org/docs/current/indexes-opclass.html. + IndexOpClass struct { + schema.Attr + Name string // Name of the operator class. + Default bool // If it is the default operator class. + Params []struct{ N, V string } // Optional parameters. + } + + // Concurrently describes the CONCURRENTLY clause to instruct Postgres to + // build or drop the index concurrently without blocking the current table. + // https://www.postgresql.org/docs/current/sql-createindex.html#SQL-CREATEINDEX-CONCURRENTLY + Concurrently struct { + schema.Clause + } + + // NoInherit attribute defines the NO INHERIT flag for CHECK constraint. + // https://postgresql.org/docs/current/catalog-pg-constraint.html + NoInherit struct { + schema.Attr + } + + // CheckColumns attribute hold the column named used by the CHECK constraints. + // This attribute is added on inspection for internal usage and has no meaning + // on migration. + CheckColumns struct { + schema.Attr + Columns []string + } + + // Partition defines the spec of a partitioned table. + Partition struct { + schema.Attr + // T defines the type/strategy of the partition. + // Can be one of: RANGE, LIST, HASH. + T string + // Partition parts. The additional attributes + // on each part can be used to control collation. + Parts []*PartitionPart + + // Internal info returned from pg_partitioned_table. + start, attrs, exprs string + } + + // An PartitionPart represents an index part that + // can be either an expression or a column. + PartitionPart struct { + X schema.Expr + C *schema.Column + Attrs []schema.Attr + } + + // Cascade describes that a CASCADE clause should be added to the DROP [TABLE|SCHEMA] + // operation. Note, this clause is automatically added to DROP SCHEMA by the planner. + Cascade struct { + schema.Clause + } +) + +// IsUnique reports if the type is unique constraint. +func (c Constraint) IsUnique() bool { return strings.ToLower(c.T) == "u" } + +// IntegerType returns the underlying integer type this serial type represents. +func (s *SerialType) IntegerType() *schema.IntegerType { + t := &schema.IntegerType{T: TypeInteger} + switch s.T { + case TypeSerial2, TypeSmallSerial: + t.T = TypeSmallInt + case TypeSerial8, TypeBigSerial: + t.T = TypeBigInt + } + return t +} + +// SetType sets the serial type from the given integer type. +func (s *SerialType) SetType(t *schema.IntegerType) { + switch t.T { + case TypeSmallInt, TypeInt2: + s.T = TypeSmallSerial + case TypeInteger, TypeInt4, TypeInt: + s.T = TypeSerial + case TypeBigInt, TypeInt8: + s.T = TypeBigSerial + } +} + +// sequence returns the inspected name of the sequence +// or the standard name defined by postgres. +func (s *SerialType) sequence(t *schema.Table, c *schema.Column) string { + if s.SequenceName != "" { + return s.SequenceName + } + return fmt.Sprintf("%s_%s_seq", t.Name, c.Name) +} + +var ( + opsOnce sync.Once + defaultOps map[postgresop.Class]bool +) + +// DefaultFor reports if the operator_class is the default for the index part. +func (o *IndexOpClass) DefaultFor(idx *schema.Index, part *schema.IndexPart) (bool, error) { + // Explicitly defined as the default (Usually, it comes from the inspection). + if o.Default && len(o.Params) == 0 { + return true, nil + } + it := &IndexType{T: IndexTypeBTree} + if sqlx.Has(idx.Attrs, it) { + it.T = strings.ToUpper(it.T) + } + // The key type must be known to check if it is the default op_class. + if part.X != nil || len(o.Params) > 0 { + return false, nil + } + opsOnce.Do(func() { + defaultOps = make(map[postgresop.Class]bool, len(postgresop.Classes)) + for _, op := range postgresop.Classes { + if op.Default { + defaultOps[postgresop.Class{Name: op.Name, Method: op.Method, Type: op.Type}] = true + } + } + }) + var ( + t string + err error + ) + switch typ := part.C.Type.Type.(type) { + case *schema.EnumType: + t = "anyenum" + case *ArrayType: + t = "anyarray" + default: + t, err = FormatType(typ) + if err != nil { + return false, fmt.Errorf("postgres: format operator-class type %T: %w", typ, err) + } + } + return defaultOps[postgresop.Class{Name: o.Name, Method: it.T, Type: t}], nil +} + +// Equal reports whether o and x are the same operator class. +func (o *IndexOpClass) Equal(x *IndexOpClass) bool { + if o.Name != x.Name || o.Default != x.Default || len(o.Params) != len(x.Params) { + return false + } + for i := range o.Params { + if o.Params[i].N != x.Params[i].N || o.Params[i].V != x.Params[i].V { + return false + } + } + return true +} + +// String returns the string representation of the operator class. +func (o *IndexOpClass) String() string { + if len(o.Params) == 0 { + return o.Name + } + var b strings.Builder + b.WriteString(o.Name) + b.WriteString("(") + for i, p := range o.Params { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(p.N) + b.WriteString("=") + b.WriteString(p.V) + } + b.WriteString(")") + return b.String() +} + +// UnmarshalText parses the operator class from its string representation. +func (o *IndexOpClass) UnmarshalText(text []byte) error { + i := bytes.IndexByte(text, '(') + if i == -1 { + o.Name = string(text) + return nil + } + o.Name = string(text[:i]) + return o.parseParams(string(text[i:])) +} + +// parseParams parses index class parameters defined in HCL or returned +// from the database. For example: '{k=v}', '(k1=v1,k2=v2)'. +func (o *IndexOpClass) parseParams(kv string) error { + switch { + case kv == "": + case strings.HasPrefix(kv, "(") && strings.HasSuffix(kv, ")"), strings.HasPrefix(kv, "{") && strings.HasSuffix(kv, "}"): + for _, e := range strings.Split(kv[1:len(kv)-1], ",") { + if kv := strings.Split(strings.TrimSpace(e), "="); len(kv) == 2 { + o.Params = append(o.Params, struct{ N, V string }{N: kv[0], V: kv[1]}) + } + } + default: + return fmt.Errorf("postgres: unexpected operator class parameters format: %q", kv) + } + return nil +} + +// newIndexStorage parses and returns the index storage parameters. +func newIndexStorage(opts string) (*IndexStorageParams, error) { + params := &IndexStorageParams{} + for _, p := range strings.Split(strings.Trim(opts, "{}"), ",") { + kv := strings.Split(p, "=") + if len(kv) != 2 { + return nil, fmt.Errorf("invalid index storage parameter: %s", p) + } + switch kv[0] { + case "autosummarize": + b, err := strconv.ParseBool(kv[1]) + if err != nil { + return nil, fmt.Errorf("failed parsing autosummarize %q: %w", kv[1], err) + } + params.AutoSummarize = b + case "pages_per_range": + i, err := strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("failed parsing pages_per_range %q: %w", kv[1], err) + } + params.PagesPerRange = i + } + } + return params, nil +} + +// reEnumType extracts the enum type and an option schema qualifier. +var reEnumType = regexp.MustCompile(`^(?:(".+"|\w+)\.)?(".+"|\w+)$`) + +func newEnumType(t string, id int64) *enumType { + var ( + e = &enumType{T: t, ID: id} + parts = reEnumType.FindStringSubmatch(e.T) + r = func(s string) string { + s = strings.ReplaceAll(s, `""`, `"`) + if len(s) > 1 && s[0] == '"' && s[len(s)-1] == '"' { + s = s[1 : len(s)-1] + } + return s + } + ) + if len(parts) > 1 { + e.Schema = r(parts[1]) + } + if len(parts) > 2 { + e.T = r(parts[2]) + } + return e +} + +const ( + // Query to list runtime parameters. + paramsQuery = `SELECT setting FROM pg_settings WHERE name IN ('lc_collate', 'lc_ctype', 'server_version_num', 'crdb_version') ORDER BY name DESC` + + // Query to list database schemas. + schemasQuery = "SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('information_schema', 'pg_catalog', 'pg_toast', 'crdb_internal', 'pg_extension') AND schema_name NOT LIKE 'pg_%temp_%' ORDER BY schema_name" + + // Query to list specific database schemas. + schemasQueryArgs = "SELECT schema_name FROM information_schema.schemata WHERE schema_name %s ORDER BY schema_name" + + // Query to list table information. + tablesQuery = ` +SELECT + t1.table_schema, + t1.table_name, + pg_catalog.obj_description(t3.oid, 'pg_class') AS comment, + t4.partattrs AS partition_attrs, + t4.partstrat AS partition_strategy, + pg_get_expr(t4.partexprs, t4.partrelid) AS partition_exprs +FROM + INFORMATION_SCHEMA.TABLES AS t1 + JOIN pg_catalog.pg_namespace AS t2 ON t2.nspname = t1.table_schema + JOIN pg_catalog.pg_class AS t3 ON t3.relnamespace = t2.oid AND t3.relname = t1.table_name + LEFT JOIN pg_catalog.pg_partitioned_table AS t4 ON t4.partrelid = t3.oid +WHERE + t1.table_type = 'BASE TABLE' + AND NOT COALESCE(t3.relispartition, false) + AND t1.table_schema IN (%s) +ORDER BY + t1.table_schema, t1.table_name +` + tablesQueryArgs = ` +SELECT + t1.table_schema, + t1.table_name, + pg_catalog.obj_description(t3.oid, 'pg_class') AS comment, + t4.partattrs AS partition_attrs, + t4.partstrat AS partition_strategy, + pg_get_expr(t4.partexprs, t4.partrelid) AS partition_exprs +FROM + INFORMATION_SCHEMA.TABLES AS t1 + JOIN pg_catalog.pg_namespace AS t2 ON t2.nspname = t1.table_schema + JOIN pg_catalog.pg_class AS t3 ON t3.relnamespace = t2.oid AND t3.relname = t1.table_name + LEFT JOIN pg_catalog.pg_partitioned_table AS t4 ON t4.partrelid = t3.oid +WHERE + t1.table_type = 'BASE TABLE' + AND NOT COALESCE(t3.relispartition, false) + AND t1.table_schema IN (%s) + AND t1.table_name IN (%s) +ORDER BY + t1.table_schema, t1.table_name +` + // Query to list table columns. + columnsQuery = ` +SELECT + t1.table_name, + t1.column_name, + t1.data_type, + pg_catalog.format_type(a.atttypid, a.atttypmod) AS format_type, + t1.is_nullable, + t1.column_default, + t1.character_maximum_length, + t1.numeric_precision, + t1.datetime_precision, + t1.numeric_scale, + t1.interval_type, + t1.character_set_name, + t1.collation_name, + t1.is_identity, + t1.identity_start, + t1.identity_increment, + (CASE WHEN t1.is_identity = 'YES' THEN (SELECT last_value FROM pg_sequences WHERE quote_ident(schemaname) || '.' || quote_ident(sequencename) = pg_get_serial_sequence(quote_ident(t1.table_schema) || '.' || quote_ident(t1.table_name), t1.column_name)) END) AS identity_last, + t1.identity_generation, + t1.generation_expression, + col_description(t3.oid, "ordinal_position") AS comment, + t4.typtype, + t4.typelem, + (CASE WHEN t4.typcategory = 'A' AND t4.typelem <> 0 THEN (SELECT t.typtype FROM pg_catalog.pg_type t WHERE t.oid = t4.typelem) END) AS elemtyp, + t4.oid +FROM + "information_schema"."columns" AS t1 + JOIN pg_catalog.pg_namespace AS t2 ON t2.nspname = t1.table_schema + JOIN pg_catalog.pg_class AS t3 ON t3.relnamespace = t2.oid AND t3.relname = t1.table_name + JOIN pg_catalog.pg_attribute AS a ON a.attrelid = t3.oid AND a.attname = t1.column_name + LEFT JOIN pg_catalog.pg_type AS t4 ON t4.oid = a.atttypid +WHERE + t1.table_schema = $1 AND t1.table_name IN (%s) +ORDER BY + t1.table_name, t1.ordinal_position +` + + fksQuery = ` +SELECT + fk.constraint_name, + fk.table_name, + a1.attname AS column_name, + fk.schema_name, + fk.referenced_table_name, + a2.attname AS referenced_column_name, + fk.referenced_schema_name, + rc.update_rule, + rc.delete_rule + FROM + ( + SELECT + con.conname AS constraint_name, + con.conrelid, + con.confrelid, + t1.relname AS table_name, + ns1.nspname AS schema_name, + t2.relname AS referenced_table_name, + ns2.nspname AS referenced_schema_name, + unnest(con.conkey) AS conkey, + unnest(con.confkey) AS confkey + FROM pg_constraint con + JOIN pg_class t1 ON t1.oid = con.conrelid + JOIN pg_class t2 ON t2.oid = con.confrelid + JOIN pg_namespace ns1 on t1.relnamespace = ns1.oid + JOIN pg_namespace ns2 on t2.relnamespace = ns2.oid + WHERE ns1.nspname = $1 + AND t1.relname IN (%s) + AND con.contype = 'f' + ) AS fk + JOIN pg_attribute a1 ON a1.attnum = fk.conkey AND a1.attrelid = fk.conrelid + JOIN pg_attribute a2 ON a2.attnum = fk.confkey AND a2.attrelid = fk.confrelid + JOIN information_schema.referential_constraints rc ON rc.constraint_name = fk.constraint_name AND rc.constraint_schema = fk.schema_name + ORDER BY + fk.conrelid, fk.constraint_name +` + + // Query to list table check constraints. + checksQuery = ` +SELECT + rel.relname AS table_name, + t1.conname AS constraint_name, + pg_get_expr(t1.conbin, t1.conrelid) as expression, + t2.attname as column_name, + t1.conkey as column_indexes, + t1.connoinherit as no_inherit +FROM + pg_constraint t1 + JOIN pg_attribute t2 + ON t2.attrelid = t1.conrelid + AND t2.attnum = ANY (t1.conkey) + JOIN pg_class rel + ON rel.oid = t1.conrelid + JOIN pg_namespace nsp + ON nsp.oid = t1.connamespace +WHERE + t1.contype = 'c' + AND nsp.nspname = $1 + AND rel.relname IN (%s) +ORDER BY + t1.conname, array_position(t1.conkey, t2.attnum) +` +) + +var ( + indexesQuery = fmt.Sprintf(indexesQueryTmpl, "(a.attname <> '' AND idx.indnatts > idx.indnkeyatts AND idx.ord > idx.indnkeyatts)", "%s") + indexesQueryNoInclude = fmt.Sprintf(indexesQueryTmpl, "false", "%s") + indexesQueryTmpl = ` +SELECT + t.relname AS table_name, + i.relname AS index_name, + am.amname AS index_type, + a.attname AS column_name, + %s AS included, + idx.indisprimary AS primary, + idx.indisunique AS unique, + con.nametypes AS constraints, + pg_get_expr(idx.indpred, idx.indrelid) AS predicate, + pg_get_indexdef(idx.indexrelid, idx.ord, false) AS expression, + pg_index_column_has_property(idx.indexrelid, idx.ord, 'desc') AS isdesc, + pg_index_column_has_property(idx.indexrelid, idx.ord, 'nulls_first') AS nulls_first, + pg_index_column_has_property(idx.indexrelid, idx.ord, 'nulls_last') AS nulls_last, + obj_description(i.oid, 'pg_class') AS comment, + i.reloptions AS options, + op.opcname AS opclass_name, + op.opcdefault AS opclass_default, + a2.attoptions AS opclass_params +FROM + ( + select + *, + generate_series(1,array_length(i.indkey,1)) as ord, + unnest(i.indkey) AS key + from pg_index i + ) idx + JOIN pg_class i ON i.oid = idx.indexrelid + JOIN pg_class t ON t.oid = idx.indrelid + JOIN pg_namespace n ON n.oid = t.relnamespace + LEFT JOIN ( + select conindid, jsonb_object_agg(conname, contype) AS nametypes + from pg_constraint + group by conindid + ) con ON con.conindid = idx.indexrelid + LEFT JOIN pg_attribute a ON (a.attrelid, a.attnum) = (idx.indrelid, idx.key) + JOIN pg_am am ON am.oid = i.relam + LEFT JOIN pg_opclass op ON op.oid = idx.indclass[idx.ord-1] + LEFT JOIN pg_attribute a2 ON (a2.attrelid, a2.attnum) = (idx.indexrelid, idx.ord) +WHERE + n.nspname = $1 + AND t.relname IN (%s) +ORDER BY + table_name, index_name, idx.ord +` +) diff --git a/vendor/ariga.io/atlas/sql/postgres/internal/postgresop/BUILD b/vendor/ariga.io/atlas/sql/postgres/internal/postgresop/BUILD new file mode 100644 index 00000000..cd5a36ed --- /dev/null +++ b/vendor/ariga.io/atlas/sql/postgres/internal/postgresop/BUILD @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "postgresop", + srcs = ["postgresop.go"], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/postgres/internal/postgresop", + importpath = "ariga.io/atlas/sql/postgres/internal/postgresop", + visibility = [ + "//third_party:__subpackages__", + "//vendor/ariga.io/atlas/sql/postgres:__subpackages__", + ], +) diff --git a/vendor/ariga.io/atlas/sql/postgres/internal/postgresop/postgresop.go b/vendor/ariga.io/atlas/sql/postgres/internal/postgresop/postgresop.go new file mode 100644 index 00000000..3b58859b --- /dev/null +++ b/vendor/ariga.io/atlas/sql/postgres/internal/postgresop/postgresop.go @@ -0,0 +1,194 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package postgresop + +// Class describes an index operator class. +type Class struct { + Name string // operator class name + Method string // index method + Type string // indexed data type + Default bool // default to the type/method above +} + +// Classes defined in latest PostgreSQL version (15). +var Classes = []*Class{ + {Name: "bit_minmax_ops", Method: "BRIN", Type: "bit", Default: true}, + {Name: "box_inclusion_ops", Method: "BRIN", Type: "box", Default: true}, + {Name: "bpchar_bloom_ops", Method: "BRIN", Type: "character", Default: false}, + {Name: "bpchar_minmax_ops", Method: "BRIN", Type: "character", Default: true}, + {Name: "bytea_bloom_ops", Method: "BRIN", Type: "bytea", Default: false}, + {Name: "bytea_minmax_ops", Method: "BRIN", Type: "bytea", Default: true}, + {Name: "char_bloom_ops", Method: "BRIN", Type: "char", Default: false}, + {Name: "char_minmax_ops", Method: "BRIN", Type: "char", Default: true}, + {Name: "date_bloom_ops", Method: "BRIN", Type: "date", Default: false}, + {Name: "date_minmax_multi_ops", Method: "BRIN", Type: "date", Default: false}, + {Name: "date_minmax_ops", Method: "BRIN", Type: "date", Default: true}, + {Name: "float4_bloom_ops", Method: "BRIN", Type: "real", Default: false}, + {Name: "float4_minmax_multi_ops", Method: "BRIN", Type: "real", Default: false}, + {Name: "float4_minmax_ops", Method: "BRIN", Type: "real", Default: true}, + {Name: "float8_bloom_ops", Method: "BRIN", Type: "double precision", Default: false}, + {Name: "float8_minmax_multi_ops", Method: "BRIN", Type: "double precision", Default: false}, + {Name: "float8_minmax_ops", Method: "BRIN", Type: "double precision", Default: true}, + {Name: "inet_bloom_ops", Method: "BRIN", Type: "inet", Default: false}, + {Name: "inet_inclusion_ops", Method: "BRIN", Type: "inet", Default: true}, + {Name: "inet_minmax_multi_ops", Method: "BRIN", Type: "inet", Default: false}, + {Name: "inet_minmax_ops", Method: "BRIN", Type: "inet", Default: false}, + {Name: "int2_bloom_ops", Method: "BRIN", Type: "smallint", Default: false}, + {Name: "int2_minmax_multi_ops", Method: "BRIN", Type: "smallint", Default: false}, + {Name: "int2_minmax_ops", Method: "BRIN", Type: "smallint", Default: true}, + {Name: "int4_bloom_ops", Method: "BRIN", Type: "integer", Default: false}, + {Name: "int4_minmax_multi_ops", Method: "BRIN", Type: "integer", Default: false}, + {Name: "int4_minmax_ops", Method: "BRIN", Type: "integer", Default: true}, + {Name: "int8_bloom_ops", Method: "BRIN", Type: "bigint", Default: false}, + {Name: "int8_minmax_multi_ops", Method: "BRIN", Type: "bigint", Default: false}, + {Name: "int8_minmax_ops", Method: "BRIN", Type: "bigint", Default: true}, + {Name: "interval_bloom_ops", Method: "BRIN", Type: "interval", Default: false}, + {Name: "interval_minmax_multi_ops", Method: "BRIN", Type: "interval", Default: false}, + {Name: "interval_minmax_ops", Method: "BRIN", Type: "interval", Default: true}, + {Name: "macaddr8_bloom_ops", Method: "BRIN", Type: "macaddr8", Default: false}, + {Name: "macaddr8_minmax_multi_ops", Method: "BRIN", Type: "macaddr8", Default: false}, + {Name: "macaddr8_minmax_ops", Method: "BRIN", Type: "macaddr8", Default: true}, + {Name: "macaddr_bloom_ops", Method: "BRIN", Type: "macaddr", Default: false}, + {Name: "macaddr_minmax_multi_ops", Method: "BRIN", Type: "macaddr", Default: false}, + {Name: "macaddr_minmax_ops", Method: "BRIN", Type: "macaddr", Default: true}, + {Name: "name_bloom_ops", Method: "BRIN", Type: "name", Default: false}, + {Name: "name_minmax_ops", Method: "BRIN", Type: "name", Default: true}, + {Name: "numeric_bloom_ops", Method: "BRIN", Type: "numeric", Default: false}, + {Name: "numeric_minmax_multi_ops", Method: "BRIN", Type: "numeric", Default: false}, + {Name: "numeric_minmax_ops", Method: "BRIN", Type: "numeric", Default: true}, + {Name: "oid_bloom_ops", Method: "BRIN", Type: "oid", Default: false}, + {Name: "oid_minmax_multi_ops", Method: "BRIN", Type: "oid", Default: false}, + {Name: "oid_minmax_ops", Method: "BRIN", Type: "oid", Default: true}, + {Name: "pg_lsn_bloom_ops", Method: "BRIN", Type: "pg_lsn", Default: false}, + {Name: "pg_lsn_minmax_multi_ops", Method: "BRIN", Type: "pg_lsn", Default: false}, + {Name: "pg_lsn_minmax_ops", Method: "BRIN", Type: "pg_lsn", Default: true}, + {Name: "range_inclusion_ops", Method: "BRIN", Type: "anyrange", Default: true}, + {Name: "text_bloom_ops", Method: "BRIN", Type: "text", Default: false}, + {Name: "text_minmax_ops", Method: "BRIN", Type: "text", Default: true}, + {Name: "tid_bloom_ops", Method: "BRIN", Type: "tid", Default: false}, + {Name: "tid_minmax_multi_ops", Method: "BRIN", Type: "tid", Default: false}, + {Name: "tid_minmax_ops", Method: "BRIN", Type: "tid", Default: true}, + {Name: "time_bloom_ops", Method: "BRIN", Type: "time without time zone", Default: false}, + {Name: "time_minmax_multi_ops", Method: "BRIN", Type: "time without time zone", Default: false}, + {Name: "time_minmax_ops", Method: "BRIN", Type: "time without time zone", Default: true}, + {Name: "timestamp_bloom_ops", Method: "BRIN", Type: "timestamp without time zone", Default: false}, + {Name: "timestamp_minmax_multi_ops", Method: "BRIN", Type: "timestamp without time zone", Default: false}, + {Name: "timestamp_minmax_ops", Method: "BRIN", Type: "timestamp without time zone", Default: true}, + {Name: "timestamptz_bloom_ops", Method: "BRIN", Type: "timestamp with time zone", Default: false}, + {Name: "timestamptz_minmax_multi_ops", Method: "BRIN", Type: "timestamp with time zone", Default: false}, + {Name: "timestamptz_minmax_ops", Method: "BRIN", Type: "timestamp with time zone", Default: true}, + {Name: "timetz_bloom_ops", Method: "BRIN", Type: "time with time zone", Default: false}, + {Name: "timetz_minmax_multi_ops", Method: "BRIN", Type: "time with time zone", Default: false}, + {Name: "timetz_minmax_ops", Method: "BRIN", Type: "time with time zone", Default: true}, + {Name: "uuid_bloom_ops", Method: "BRIN", Type: "uuid", Default: false}, + {Name: "uuid_minmax_multi_ops", Method: "BRIN", Type: "uuid", Default: false}, + {Name: "uuid_minmax_ops", Method: "BRIN", Type: "uuid", Default: true}, + {Name: "varbit_minmax_ops", Method: "BRIN", Type: "bit varying", Default: true}, + {Name: "array_ops", Method: "BTREE", Type: "anyarray", Default: true}, + {Name: "bit_ops", Method: "BTREE", Type: "bit", Default: true}, + {Name: "bool_ops", Method: "BTREE", Type: "boolean", Default: true}, + {Name: "bpchar_ops", Method: "BTREE", Type: "character", Default: true}, + {Name: "bpchar_pattern_ops", Method: "BTREE", Type: "character", Default: false}, + {Name: "bytea_ops", Method: "BTREE", Type: "bytea", Default: true}, + {Name: "char_ops", Method: "BTREE", Type: "char", Default: true}, + {Name: "cidr_ops", Method: "BTREE", Type: "inet", Default: false}, + {Name: "date_ops", Method: "BTREE", Type: "date", Default: true}, + {Name: "enum_ops", Method: "BTREE", Type: "anyenum", Default: true}, + {Name: "float4_ops", Method: "BTREE", Type: "real", Default: true}, + {Name: "float8_ops", Method: "BTREE", Type: "double precision", Default: true}, + {Name: "inet_ops", Method: "BTREE", Type: "inet", Default: true}, + {Name: "int2_ops", Method: "BTREE", Type: "smallint", Default: true}, + {Name: "int4_ops", Method: "BTREE", Type: "integer", Default: true}, + {Name: "int8_ops", Method: "BTREE", Type: "bigint", Default: true}, + {Name: "interval_ops", Method: "BTREE", Type: "interval", Default: true}, + {Name: "jsonb_ops", Method: "BTREE", Type: "jsonb", Default: true}, + {Name: "macaddr8_ops", Method: "BTREE", Type: "macaddr8", Default: true}, + {Name: "macaddr_ops", Method: "BTREE", Type: "macaddr", Default: true}, + {Name: "money_ops", Method: "BTREE", Type: "money", Default: true}, + {Name: "multirange_ops", Method: "BTREE", Type: "anymultirange", Default: true}, + {Name: "name_ops", Method: "BTREE", Type: "name", Default: true}, + {Name: "numeric_ops", Method: "BTREE", Type: "numeric", Default: true}, + {Name: "oid_ops", Method: "BTREE", Type: "oid", Default: true}, + {Name: "oidvector_ops", Method: "BTREE", Type: "oidvector", Default: true}, + {Name: "pg_lsn_ops", Method: "BTREE", Type: "pg_lsn", Default: true}, + {Name: "range_ops", Method: "BTREE", Type: "anyrange", Default: true}, + {Name: "record_image_ops", Method: "BTREE", Type: "record", Default: false}, + {Name: "record_ops", Method: "BTREE", Type: "record", Default: true}, + {Name: "text_ops", Method: "BTREE", Type: "text", Default: true}, + {Name: "text_pattern_ops", Method: "BTREE", Type: "text", Default: false}, + {Name: "tid_ops", Method: "BTREE", Type: "tid", Default: true}, + {Name: "time_ops", Method: "BTREE", Type: "time without time zone", Default: true}, + {Name: "timestamp_ops", Method: "BTREE", Type: "timestamp without time zone", Default: true}, + {Name: "timestamptz_ops", Method: "BTREE", Type: "timestamp with time zone", Default: true}, + {Name: "timetz_ops", Method: "BTREE", Type: "time with time zone", Default: true}, + {Name: "tsquery_ops", Method: "BTREE", Type: "tsquery", Default: true}, + {Name: "tsvector_ops", Method: "BTREE", Type: "tsvector", Default: true}, + {Name: "uuid_ops", Method: "BTREE", Type: "uuid", Default: true}, + {Name: "varbit_ops", Method: "BTREE", Type: "bit varying", Default: true}, + {Name: "varchar_ops", Method: "BTREE", Type: "text", Default: false}, + {Name: "varchar_pattern_ops", Method: "BTREE", Type: "text", Default: false}, + {Name: "xid8_ops", Method: "BTREE", Type: "xid8", Default: true}, + {Name: "array_ops", Method: "GIN", Type: "anyarray", Default: true}, + {Name: "jsonb_ops", Method: "GIN", Type: "jsonb", Default: true}, + {Name: "jsonb_path_ops", Method: "GIN", Type: "jsonb", Default: false}, + {Name: "tsvector_ops", Method: "GIN", Type: "tsvector", Default: true}, + {Name: "box_ops", Method: "GIST", Type: "box", Default: true}, + {Name: "circle_ops", Method: "GIST", Type: "circle", Default: true}, + {Name: "inet_ops", Method: "GIST", Type: "inet", Default: false}, + {Name: "multirange_ops", Method: "GIST", Type: "anymultirange", Default: true}, + {Name: "point_ops", Method: "GIST", Type: "point", Default: true}, + {Name: "poly_ops", Method: "GIST", Type: "polygon", Default: true}, + {Name: "range_ops", Method: "GIST", Type: "anyrange", Default: true}, + {Name: "tsquery_ops", Method: "GIST", Type: "tsquery", Default: true}, + {Name: "tsvector_ops", Method: "GIST", Type: "tsvector", Default: true}, + {Name: "aclitem_ops", Method: "HASH", Type: "aclitem", Default: true}, + {Name: "array_ops", Method: "HASH", Type: "anyarray", Default: true}, + {Name: "bool_ops", Method: "HASH", Type: "boolean", Default: true}, + {Name: "bpchar_ops", Method: "HASH", Type: "character", Default: true}, + {Name: "bpchar_pattern_ops", Method: "HASH", Type: "character", Default: false}, + {Name: "bytea_ops", Method: "HASH", Type: "bytea", Default: true}, + {Name: "char_ops", Method: "HASH", Type: "char", Default: true}, + {Name: "cid_ops", Method: "HASH", Type: "cid", Default: true}, + {Name: "cidr_ops", Method: "HASH", Type: "inet", Default: false}, + {Name: "date_ops", Method: "HASH", Type: "date", Default: true}, + {Name: "enum_ops", Method: "HASH", Type: "anyenum", Default: true}, + {Name: "float4_ops", Method: "HASH", Type: "real", Default: true}, + {Name: "float8_ops", Method: "HASH", Type: "double precision", Default: true}, + {Name: "inet_ops", Method: "HASH", Type: "inet", Default: true}, + {Name: "int2_ops", Method: "HASH", Type: "smallint", Default: true}, + {Name: "int4_ops", Method: "HASH", Type: "integer", Default: true}, + {Name: "int8_ops", Method: "HASH", Type: "bigint", Default: true}, + {Name: "interval_ops", Method: "HASH", Type: "interval", Default: true}, + {Name: "jsonb_ops", Method: "HASH", Type: "jsonb", Default: true}, + {Name: "macaddr8_ops", Method: "HASH", Type: "macaddr8", Default: true}, + {Name: "macaddr_ops", Method: "HASH", Type: "macaddr", Default: true}, + {Name: "multirange_ops", Method: "HASH", Type: "anymultirange", Default: true}, + {Name: "name_ops", Method: "HASH", Type: "name", Default: true}, + {Name: "numeric_ops", Method: "HASH", Type: "numeric", Default: true}, + {Name: "oid_ops", Method: "HASH", Type: "oid", Default: true}, + {Name: "oidvector_ops", Method: "HASH", Type: "oidvector", Default: true}, + {Name: "pg_lsn_ops", Method: "HASH", Type: "pg_lsn", Default: true}, + {Name: "range_ops", Method: "HASH", Type: "anyrange", Default: true}, + {Name: "record_ops", Method: "HASH", Type: "record", Default: true}, + {Name: "text_ops", Method: "HASH", Type: "text", Default: true}, + {Name: "text_pattern_ops", Method: "HASH", Type: "text", Default: false}, + {Name: "tid_ops", Method: "HASH", Type: "tid", Default: true}, + {Name: "time_ops", Method: "HASH", Type: "time without time zone", Default: true}, + {Name: "timestamp_ops", Method: "HASH", Type: "timestamp without time zone", Default: true}, + {Name: "timestamptz_ops", Method: "HASH", Type: "timestamp with time zone", Default: true}, + {Name: "timetz_ops", Method: "HASH", Type: "time with time zone", Default: true}, + {Name: "uuid_ops", Method: "HASH", Type: "uuid", Default: true}, + {Name: "varchar_ops", Method: "HASH", Type: "text", Default: false}, + {Name: "varchar_pattern_ops", Method: "HASH", Type: "text", Default: false}, + {Name: "xid8_ops", Method: "HASH", Type: "xid8", Default: true}, + {Name: "xid_ops", Method: "HASH", Type: "xid", Default: true}, + {Name: "box_ops", Method: "SPGIST", Type: "box", Default: true}, + {Name: "inet_ops", Method: "SPGIST", Type: "inet", Default: true}, + {Name: "kd_point_ops", Method: "SPGIST", Type: "point", Default: false}, + {Name: "poly_ops", Method: "SPGIST", Type: "polygon", Default: true}, + {Name: "quad_point_ops", Method: "SPGIST", Type: "point", Default: true}, + {Name: "range_ops", Method: "SPGIST", Type: "anyrange", Default: true}, + {Name: "text_ops", Method: "SPGIST", Type: "text", Default: true}, +} diff --git a/vendor/ariga.io/atlas/sql/postgres/migrate.go b/vendor/ariga.io/atlas/sql/postgres/migrate.go new file mode 100644 index 00000000..38c7d5d3 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/postgres/migrate.go @@ -0,0 +1,1343 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package postgres + +import ( + "context" + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" +) + +// DefaultPlan provides basic planning capabilities for PostgreSQL dialects. +// Note, it is recommended to call Open, create a new Driver and use its +// migrate.PlanApplier when a database connection is available. +var DefaultPlan migrate.PlanApplier = &planApply{conn: conn{ExecQuerier: sqlx.NoRows}} + +// A planApply provides migration capabilities for schema elements. +type planApply struct{ conn } + +// PlanChanges returns a migration plan for the given schema changes. +func (p *planApply) PlanChanges(ctx context.Context, name string, changes []schema.Change, opts ...migrate.PlanOption) (*migrate.Plan, error) { + s := &state{ + conn: p.conn, + Plan: migrate.Plan{ + Name: name, + Transactional: true, + }, + createdE: make(map[string]*schema.EnumType), + alteredE: make(map[string]*schema.EnumType), + droppedE: make(map[string]*schema.EnumType), + } + for _, o := range opts { + o(&s.PlanOptions) + } + if err := s.plan(ctx, changes); err != nil { + return nil, err + } + if err := sqlx.SetReversible(&s.Plan); err != nil { + return nil, err + } + return &s.Plan, nil +} + +// ApplyChanges applies the changes on the database. An error is returned +// if the driver is unable to produce a plan to do so, or one of the statements +// is failed or unsupported. +func (p *planApply) ApplyChanges(ctx context.Context, changes []schema.Change, opts ...migrate.PlanOption) error { + return sqlx.ApplyChanges(ctx, changes, p, opts...) +} + +// state represents the state of a planning. It is not part of +// planApply so that multiple planning/applying can be called +// in parallel. +type state struct { + conn + migrate.Plan + migrate.PlanOptions + droppedT []*schema.Table + // Track the enums that were created, altered and + // dropped, in this phase to avoid duplicate updates. + createdE, alteredE, droppedE map[string]*schema.EnumType +} + +// Exec executes the changes on the database. An error is returned +// if one of the operations fail, or a change is not supported. +func (s *state) plan(ctx context.Context, changes []schema.Change) error { + if s.SchemaQualifier != nil { + if err := sqlx.CheckChangesScope(changes); err != nil { + return err + } + } + planned := s.topLevel(changes) + planned, err := sqlx.DetachCycles(planned) + if err != nil { + return err + } + for _, c := range planned { + switch c := c.(type) { + case *schema.AddTable: + err = s.addTable(ctx, c) + case *schema.DropTable: + err = s.dropTable(ctx, c) + case *schema.ModifyTable: + err = s.modifyTable(ctx, c) + case *schema.RenameTable: + s.renameTable(c) + default: + err = fmt.Errorf("unsupported change %T", c) + } + if err != nil { + return err + } + } + return nil +} + +// topLevel executes first the changes for creating or dropping schemas (top-level schema elements). +func (s *state) topLevel(changes []schema.Change) []schema.Change { + planned := make([]schema.Change, 0, len(changes)) + for _, c := range changes { + switch c := c.(type) { + case *schema.AddSchema: + b := s.Build("CREATE SCHEMA") + // Add the 'IF NOT EXISTS' clause if it is explicitly specified, or if the schema name is 'public'. + // That is because the 'public' schema is automatically created by PostgreSQL in every new database, + // and running the command with this clause will fail in case the schema already exists. + if sqlx.Has(c.Extra, &schema.IfNotExists{}) || c.S.Name == "public" { + b.P("IF NOT EXISTS") + } + b.Ident(c.S.Name) + s.append(&migrate.Change{ + Cmd: b.String(), + Source: c, + Reverse: s.Build("DROP SCHEMA").Ident(c.S.Name).P("CASCADE").String(), + Comment: fmt.Sprintf("Add new schema named %q", c.S.Name), + }) + case *schema.DropSchema: + b := s.Build("DROP SCHEMA") + if sqlx.Has(c.Extra, &schema.IfExists{}) { + b.P("IF EXISTS") + } + b.Ident(c.S.Name).P("CASCADE") + s.append(&migrate.Change{ + Cmd: b.String(), + Source: c, + Comment: fmt.Sprintf("Drop schema named %q", c.S.Name), + }) + default: + planned = append(planned, c) + } + } + return planned +} + +// addTable builds and executes the query for creating a table in a schema. +func (s *state) addTable(ctx context.Context, add *schema.AddTable) error { + // Create enum types before using them in the 'CREATE TABLE' statement. + if err := s.mayAddEnums(ctx, add.T, add.T.Columns...); err != nil { + return err + } + var ( + errs []string + b = s.Build("CREATE TABLE") + ) + if sqlx.Has(add.Extra, &schema.IfNotExists{}) { + b.P("IF NOT EXISTS") + } + b.Table(add.T) + b.WrapIndent(func(b *sqlx.Builder) { + b.MapIndent(add.T.Columns, func(i int, b *sqlx.Builder) { + if err := s.column(b, add.T, add.T.Columns[i]); err != nil { + errs = append(errs, err.Error()) + } + }) + if pk := add.T.PrimaryKey; pk != nil { + b.Comma().NL().P("PRIMARY KEY") + if err := s.index(b, pk); err != nil { + errs = append(errs, err.Error()) + } + } + if len(add.T.ForeignKeys) > 0 { + b.Comma() + s.fks(b, add.T.ForeignKeys...) + } + for _, attr := range add.T.Attrs { + if c, ok := attr.(*schema.Check); ok { + b.Comma().NL() + check(b, c) + } + } + }) + if p := (Partition{}); sqlx.Has(add.T.Attrs, &p) { + s, err := formatPartition(p) + if err != nil { + errs = append(errs, err.Error()) + } + b.P(s) + } + if len(errs) > 0 { + return fmt.Errorf("create table %q: %s", add.T.Name, strings.Join(errs, ", ")) + } + s.append(&migrate.Change{ + Cmd: b.String(), + Source: add, + Comment: fmt.Sprintf("create %q table", add.T.Name), + Reverse: s.Build("DROP TABLE").Table(add.T).String(), + }) + for _, idx := range add.T.Indexes { + // Indexes do not need to be created concurrently on new tables. + if err := s.addIndexes(add.T, &schema.AddIndex{I: idx}); err != nil { + return err + } + } + s.addComments(add.T) + return nil +} + +// dropTable builds and executes the query for dropping a table from a schema. +func (s *state) dropTable(ctx context.Context, drop *schema.DropTable) error { + cmd := &changeGroup{} + s.droppedT = append(s.droppedT, drop.T) + for _, e := range s.enumTypes(drop.T) { + if err := s.mayDropEnum(cmd, drop.T.Schema, e); err != nil { + return err + } + } + rs := &state{ + conn: s.conn, + PlanOptions: s.PlanOptions, + alteredE: s.alteredE, + // Enums that were dropped above, were + // also created in the reverse commands. + createdE: s.droppedE, + droppedE: s.createdE, + } + if err := rs.addTable(ctx, &schema.AddTable{T: drop.T}); err != nil { + return fmt.Errorf("calculate reverse for drop table %q: %w", drop.T.Name, err) + } + b := s.Build("DROP TABLE") + if sqlx.Has(drop.Extra, &schema.IfExists{}) { + b.P("IF EXISTS") + } + b.Table(drop.T) + if sqlx.Has(drop.Extra, &Cascade{}) { + b.P("CASCADE") + } + cmd.main = &migrate.Change{ + Cmd: b.String(), + Source: drop, + Comment: fmt.Sprintf("drop %q table", drop.T.Name), + // The reverse of 'DROP TABLE' might be a multi + // statement operation. e.g., table with indexes. + Reverse: func() any { + cmd := make([]string, len(rs.Changes)) + for i, c := range rs.Changes { + cmd[i] = c.Cmd + } + if len(cmd) == 1 { + return cmd[0] + } + return cmd + }(), + } + cmd.append(s) + return nil +} + +// modifyTable builds the statements that bring the table into its modified state. +func (s *state) modifyTable(ctx context.Context, modify *schema.ModifyTable) error { + var ( + alter []schema.Change + addI []*schema.AddIndex + dropI []*schema.DropIndex + changes []*migrate.Change + ) + for _, change := range skipAutoChanges(modify.Changes) { + switch change := change.(type) { + case *schema.AddAttr, *schema.ModifyAttr: + from, to, err := commentChange(change) + if err != nil { + return err + } + changes = append(changes, s.tableComment(modify.T, to, from)) + case *schema.DropAttr: + return fmt.Errorf("unsupported change type: %T", change) + case *schema.AddIndex: + if c := (schema.Comment{}); sqlx.Has(change.I.Attrs, &c) { + changes = append(changes, s.indexComment(modify.T, change.I, c.Text, "")) + } + addI = append(addI, change) + case *schema.DropIndex: + // Unlike DROP INDEX statements that are executed separately, + // DROP CONSTRAINT are added to the ALTER TABLE statement below. + if isUniqueConstraint(change.I) { + alter = append(alter, change) + } else { + dropI = append(dropI, change) + } + case *schema.ModifyPrimaryKey: + // Primary key modification needs to be split into "Drop" and "Add" + // because the new key may include columns that have not been added yet. + alter = append(alter, &schema.DropPrimaryKey{ + P: change.From, + }, &schema.AddPrimaryKey{ + P: change.To, + }) + case *schema.ModifyIndex: + k := change.Change + if change.Change.Is(schema.ChangeComment) { + from, to, err := commentChange(sqlx.CommentDiff(change.From.Attrs, change.To.Attrs)) + if err != nil { + return err + } + changes = append(changes, s.indexComment(modify.T, change.To, to, from)) + // If only the comment of the index was changed. + if k &= ^schema.ChangeComment; k.Is(schema.NoChange) { + continue + } + } + // Index modification requires rebuilding the index. + addI = append(addI, &schema.AddIndex{I: change.To}) + dropI = append(dropI, &schema.DropIndex{I: change.From}) + case *schema.RenameIndex: + changes = append(changes, &migrate.Change{ + Source: change, + Comment: fmt.Sprintf("rename an index from %q to %q", change.From.Name, change.To.Name), + Cmd: s.Build("ALTER INDEX").Ident(change.From.Name).P("RENAME TO").Ident(change.To.Name).String(), + Reverse: s.Build("ALTER INDEX").Ident(change.To.Name).P("RENAME TO").Ident(change.From.Name).String(), + }) + case *schema.ModifyForeignKey: + // Foreign-key modification is translated into 2 steps. + // Dropping the current foreign key and creating a new one. + alter = append(alter, &schema.DropForeignKey{ + F: change.From, + }, &schema.AddForeignKey{ + F: change.To, + }) + case *schema.AddColumn: + if err := s.mayAddEnums(ctx, modify.T, change.C); err != nil { + return err + } + if c := (schema.Comment{}); sqlx.Has(change.C.Attrs, &c) { + changes = append(changes, s.columnComment(modify.T, change.C, c.Text, "")) + } + alter = append(alter, change) + case *schema.ModifyColumn: + k := change.Change + if change.Change.Is(schema.ChangeComment) { + from, to, err := commentChange(sqlx.CommentDiff(change.From.Attrs, change.To.Attrs)) + if err != nil { + return err + } + changes = append(changes, s.columnComment(modify.T, change.To, to, from)) + // If only the comment of the column was changed. + if k &= ^schema.ChangeComment; k.Is(schema.NoChange) { + continue + } + } + from, ok1 := hasEnumType(change.From) + to, ok2 := hasEnumType(change.To) + switch { + // Enum was changed (underlying values). + case ok1 && ok2 && s.enumIdent(modify.T.Schema, from) == s.enumIdent(modify.T.Schema, to): + if err := s.alterEnum(modify.T, from, to); err != nil { + return err + } + // If only the enum values were changed, + // there is no need to ALTER the table. + if k == schema.ChangeType { + continue + } + // Enum was added or changed. + case !ok1 && ok2 || + ok1 && ok2 && s.enumIdent(modify.T.Schema, from) != s.enumIdent(modify.T.Schema, to): + if err := s.mayAddEnums(ctx, modify.T, change.To); err != nil { + return err + } + } + alter = append(alter, &schema.ModifyColumn{To: change.To, From: change.From, Change: k}) + case *schema.RenameColumn: + // "RENAME COLUMN" cannot be combined with other alterations. + b := s.Build("ALTER TABLE").Table(modify.T).P("RENAME COLUMN") + r := b.Clone() + changes = append(changes, &migrate.Change{ + Source: change, + Comment: fmt.Sprintf("rename a column from %q to %q", change.From.Name, change.To.Name), + Cmd: b.Ident(change.From.Name).P("TO").Ident(change.To.Name).String(), + Reverse: r.Ident(change.To.Name).P("TO").Ident(change.From.Name).String(), + }) + default: + alter = append(alter, change) + } + } + if err := s.dropIndexes(modify.T, dropI...); err != nil { + return err + } + if len(alter) > 0 { + if err := s.alterTable(modify.T, alter); err != nil { + return err + } + } + if err := s.addIndexes(modify.T, addI...); err != nil { + return err + } + s.append(changes...) + return nil +} + +// alterTable modifies the given table by executing on it a list of changes in one SQL statement. +func (s *state) alterTable(t *schema.Table, changes []schema.Change) error { + var ( + reverse []schema.Change + reversible = true + ) + // Constraints drop should be executed first. + sort.SliceStable(changes, func(i, j int) bool { + return dropConst(changes[i]) && !dropConst(changes[j]) + }) + build := func(alter *changeGroup, changes []schema.Change) (string, error) { + b := s.Build("ALTER TABLE").Table(t) + err := b.MapCommaErr(changes, func(i int, b *sqlx.Builder) error { + switch change := changes[i].(type) { + case *schema.AddColumn: + b.P("ADD COLUMN") + if err := s.column(b, t, change.C); err != nil { + return err + } + reverse = append(reverse, &schema.DropColumn{C: change.C}) + case *schema.ModifyColumn: + if err := s.alterColumn(b, alter, t, change); err != nil { + return err + } + if change.Change.Is(schema.ChangeGenerated) { + reversible = false + } + reverse = append(reverse, &schema.ModifyColumn{ + From: change.To, + To: change.From, + Change: change.Change & ^schema.ChangeGenerated, + }) + toE, toHas := hasEnumType(change.To) + fromE, fromHas := hasEnumType(change.From) + // In case the enum was dropped or replaced with a different one. + if fromHas && !toHas || fromHas && toHas && s.enumIdent(t.Schema, fromE) != s.enumIdent(t.Schema, toE) { + if err := s.mayDropEnum(alter, t.Schema, fromE); err != nil { + return err + } + } + case *schema.DropColumn: + b.P("DROP COLUMN").Ident(change.C.Name) + reverse = append(reverse, &schema.AddColumn{C: change.C}) + if e, ok := hasEnumType(change.C); ok { + if err := s.mayDropEnum(alter, t.Schema, e); err != nil { + return err + } + } + case *schema.AddIndex: + // Skip reversing this operation as it is the inverse of + // the operation above and should not be used besides this. + b.P("ADD CONSTRAINT").Ident(change.I.Name).P("UNIQUE") + if err := s.indexParts(b, change.I); err != nil { + return err + } + case *schema.DropIndex: + b.P("DROP CONSTRAINT").Ident(change.I.Name) + reverse = append(reverse, &schema.AddIndex{I: change.I}) + case *schema.AddPrimaryKey: + b.P("ADD PRIMARY KEY") + if err := s.index(b, change.P); err != nil { + return err + } + reverse = append(reverse, &schema.DropPrimaryKey{P: change.P}) + case *schema.DropPrimaryKey: + b.P("DROP CONSTRAINT").Ident(pkName(t, change.P)) + reverse = append(reverse, &schema.AddPrimaryKey{P: change.P}) + case *schema.AddForeignKey: + b.P("ADD") + s.fks(b, change.F) + reverse = append(reverse, &schema.DropForeignKey{F: change.F}) + case *schema.DropForeignKey: + b.P("DROP CONSTRAINT").Ident(change.F.Symbol) + reverse = append(reverse, &schema.AddForeignKey{F: change.F}) + case *schema.AddCheck: + check(b.P("ADD"), change.C) + // Reverse operation is supported if + // the constraint name is not generated. + if reversible = reversible && change.C.Name != ""; reversible { + reverse = append(reverse, &schema.DropCheck{C: change.C}) + } + case *schema.DropCheck: + b.P("DROP CONSTRAINT").Ident(change.C.Name) + reverse = append(reverse, &schema.AddCheck{C: change.C}) + case *schema.ModifyCheck: + switch { + case change.From.Name == "": + return errors.New("cannot modify unnamed check constraint") + case change.From.Name != change.To.Name: + return fmt.Errorf("mismatch check constraint names: %q != %q", change.From.Name, change.To.Name) + case change.From.Expr != change.To.Expr, + sqlx.Has(change.From.Attrs, &NoInherit{}) && !sqlx.Has(change.To.Attrs, &NoInherit{}), + !sqlx.Has(change.From.Attrs, &NoInherit{}) && sqlx.Has(change.To.Attrs, &NoInherit{}): + b.P("DROP CONSTRAINT").Ident(change.From.Name).Comma().P("ADD") + check(b, change.To) + default: + return errors.New("unknown check constraint change") + } + reverse = append(reverse, &schema.ModifyCheck{ + From: change.To, + To: change.From, + }) + } + return nil + }) + if err != nil { + return "", err + } + return b.String(), nil + } + cmd := &changeGroup{} + stmt, err := build(cmd, changes) + if err != nil { + return fmt.Errorf("alter table %q: %v", t.Name, err) + } + cmd.main = &migrate.Change{ + Cmd: stmt, + Source: &schema.ModifyTable{ + T: t, + Changes: changes, + }, + Comment: fmt.Sprintf("modify %q table", t.Name), + } + if reversible { + // Changes should be reverted in + // a reversed order they were created. + sqlx.ReverseChanges(reverse) + if cmd.main.Reverse, err = build(&changeGroup{}, reverse); err != nil { + return fmt.Errorf("reverse alter table %q: %v", t.Name, err) + } + } + cmd.append(s) + return nil +} + +// changeGroup describes an alter table migrate.Change where its main command +// can be supported by additional statements before and after it is executed. +type changeGroup struct { + main *migrate.Change + before, after []*migrate.Change +} + +func (a *changeGroup) append(s *state) { + s.append(a.before...) + s.append(a.main) + s.append(a.after...) +} + +func (s *state) alterColumn(b *sqlx.Builder, alter *changeGroup, t *schema.Table, c *schema.ModifyColumn) error { + for k := c.Change; !k.Is(schema.NoChange); { + b.P("ALTER COLUMN").Ident(c.To.Name) + switch { + case k.Is(schema.ChangeType): + if err := s.alterType(b, alter, t, c); err != nil { + return err + } + k &= ^schema.ChangeType + case k.Is(schema.ChangeNull) && c.To.Type.Null: + if t, ok := c.To.Type.Type.(*SerialType); ok { + return fmt.Errorf("NOT NULL constraint is required for %s column %q", t.T, c.To.Name) + } + b.P("DROP NOT NULL") + k &= ^schema.ChangeNull + case k.Is(schema.ChangeNull) && !c.To.Type.Null: + b.P("SET NOT NULL") + k &= ^schema.ChangeNull + case k.Is(schema.ChangeDefault) && c.To.Default == nil: + b.P("DROP DEFAULT") + k &= ^schema.ChangeDefault + case k.Is(schema.ChangeDefault) && c.To.Default != nil: + s.columnDefault(b.P("SET"), c.To) + k &= ^schema.ChangeDefault + case k.Is(schema.ChangeAttr): + toI, ok := identity(c.To.Attrs) + if !ok { + return fmt.Errorf("unexpected attribute change (expect IDENTITY): %v", c.To.Attrs) + } + // The syntax for altering identity columns is identical to sequence_options. + // https://www.postgresql.org/docs/current/sql-altersequence.html + b.P("SET GENERATED", toI.Generation, "SET START WITH", strconv.FormatInt(toI.Sequence.Start, 10), "SET INCREMENT BY", strconv.FormatInt(toI.Sequence.Increment, 10)) + // Skip SEQUENCE RESTART in case the "start value" is less than the "current value" in one + // of the states (inspected and desired), because this function is used for both UP and DOWN. + if fromI, ok := identity(c.From.Attrs); (!ok || fromI.Sequence.Last < toI.Sequence.Start) && toI.Sequence.Last < toI.Sequence.Start { + b.P("RESTART") + } + k &= ^schema.ChangeAttr + case k.Is(schema.ChangeGenerated): + if sqlx.Has(c.To.Attrs, &schema.GeneratedExpr{}) { + return fmt.Errorf("unexpected generation expression change (expect DROP EXPRESSION): %v", c.To.Attrs) + } + b.P("DROP EXPRESSION") + k &= ^schema.ChangeGenerated + default: // e.g. schema.ChangeComment. + return fmt.Errorf("unexpected column change: %d", k) + } + if !k.Is(schema.NoChange) { + b.Comma() + } + } + return nil +} + +// alterType appends the clause(s) to alter the column type and assuming the +// "ALTER COLUMN " was called before by the alterColumn function. +func (s *state) alterType(b *sqlx.Builder, alter *changeGroup, t *schema.Table, c *schema.ModifyColumn) error { + // Commands for creating and dropping serial sequences. + createDropSeq := func(st *SerialType) (string, string, string) { + seq := fmt.Sprintf(`%s%q`, s.schemaPrefix(t.Schema), st.sequence(t, c.To)) + drop := s.Build("DROP SEQUENCE IF EXISTS").P(seq).String() + create := s.Build("CREATE SEQUENCE IF NOT EXISTS").P(seq, "OWNED BY"). + P(fmt.Sprintf(`%s%q.%q`, s.schemaPrefix(t.Schema), t.Name, c.To.Name)). + String() + return create, drop, seq + } + toS, toHas := c.To.Type.Type.(*SerialType) + fromS, fromHas := c.From.Type.Type.(*SerialType) + switch { + // Sequence was dropped. + case fromHas && !toHas: + b.P("DROP DEFAULT") + create, drop, _ := createDropSeq(fromS) + // Sequence should be deleted after it was dropped + // from the DEFAULT value. + alter.after = append(alter.after, &migrate.Change{ + Source: c, + Comment: fmt.Sprintf("drop sequence used by serial column %q", c.From.Name), + Cmd: drop, + Reverse: create, + }) + toT, err := FormatType(c.To.Type.Type) + if err != nil { + return err + } + fromT, err := FormatType(fromS.IntegerType()) + if err != nil { + return err + } + // Underlying type was changed. e.g. serial to bigint. + if toT != fromT { + b.Comma().P("ALTER COLUMN").Ident(c.To.Name).P("TYPE", toT) + } + // Sequence was added. + case !fromHas && toHas: + create, drop, seq := createDropSeq(toS) + // Sequence should be created before it is used by the + // column DEFAULT value. + alter.before = append(alter.before, &migrate.Change{ + Source: c, + Comment: fmt.Sprintf("create sequence for serial column %q", c.To.Name), + Cmd: create, + Reverse: drop, + }) + b.P("SET DEFAULT", fmt.Sprintf("nextval('%s')", seq)) + toT, err := FormatType(toS.IntegerType()) + if err != nil { + return err + } + fromT, err := FormatType(c.From.Type.Type) + if err != nil { + return err + } + // Underlying type was changed. e.g. integer to bigserial (bigint). + if toT != fromT { + b.Comma().P("ALTER COLUMN").Ident(c.To.Name).P("TYPE", toT) + } + // Serial type was changed. e.g. serial to bigserial. + case fromHas && toHas: + f, err := FormatType(toS.IntegerType()) + if err != nil { + return err + } + b.P("TYPE", f) + default: + var ( + f string + err error + ) + if e, ok := c.To.Type.Type.(*schema.EnumType); ok { + f = s.enumIdent(t.Schema, e) + } else if f, err = FormatType(c.To.Type.Type); err != nil { + return err + } + b.P("TYPE", f) + } + if collate := (schema.Collation{}); sqlx.Has(c.To.Attrs, &collate) { + b.P("COLLATE", collate.V) + } + return nil +} + +func (s *state) renameTable(c *schema.RenameTable) { + s.append(&migrate.Change{ + Source: c, + Comment: fmt.Sprintf("rename a table from %q to %q", c.From.Name, c.To.Name), + Cmd: s.Build("ALTER TABLE").Table(c.From).P("RENAME TO").Table(c.To).String(), + Reverse: s.Build("ALTER TABLE").Table(c.To).P("RENAME TO").Table(c.From).String(), + }) +} + +func (s *state) addComments(t *schema.Table) { + var c schema.Comment + if sqlx.Has(t.Attrs, &c) && c.Text != "" { + s.append(s.tableComment(t, c.Text, "")) + } + for i := range t.Columns { + if sqlx.Has(t.Columns[i].Attrs, &c) && c.Text != "" { + s.append(s.columnComment(t, t.Columns[i], c.Text, "")) + } + } + for i := range t.Indexes { + if sqlx.Has(t.Indexes[i].Attrs, &c) && c.Text != "" { + s.append(s.indexComment(t, t.Indexes[i], c.Text, "")) + } + } +} + +func (s *state) tableComment(t *schema.Table, to, from string) *migrate.Change { + b := s.Build("COMMENT ON TABLE").Table(t).P("IS") + return &migrate.Change{ + Cmd: b.Clone().P(quote(to)).String(), + Comment: fmt.Sprintf("set comment to table: %q", t.Name), + Reverse: b.Clone().P(quote(from)).String(), + } +} + +func (s *state) columnComment(t *schema.Table, c *schema.Column, to, from string) *migrate.Change { + b := s.Build("COMMENT ON COLUMN").Table(t) + b.WriteByte('.') + b.Ident(c.Name).P("IS") + return &migrate.Change{ + Cmd: b.Clone().P(quote(to)).String(), + Comment: fmt.Sprintf("set comment to column: %q on table: %q", c.Name, t.Name), + Reverse: b.Clone().P(quote(from)).String(), + } +} + +func (s *state) indexComment(t *schema.Table, idx *schema.Index, to, from string) *migrate.Change { + b := s.Build("COMMENT ON INDEX").Ident(idx.Name).P("IS") + return &migrate.Change{ + Cmd: b.Clone().P(quote(to)).String(), + Comment: fmt.Sprintf("set comment to index: %q on table: %q", idx.Name, t.Name), + Reverse: b.Clone().P(quote(from)).String(), + } +} + +func (s *state) dropIndexes(t *schema.Table, drops ...*schema.DropIndex) error { + adds := make([]*schema.AddIndex, len(drops)) + for i, d := range drops { + adds[i] = &schema.AddIndex{I: d.I, Extra: d.Extra} + } + rs := &state{conn: s.conn, PlanOptions: s.PlanOptions} + if err := rs.addIndexes(t, adds...); err != nil { + return err + } + for i, add := range adds { + s.append(&migrate.Change{ + Cmd: rs.Changes[i].Reverse.(string), + Comment: fmt.Sprintf("drop index %q from table: %q", add.I.Name, t.Name), + Reverse: rs.Changes[i].Cmd, + }) + } + return nil +} + +func (s *state) mayAddEnums(ctx context.Context, t *schema.Table, columns ...*schema.Column) error { + for _, c := range columns { + e, ok := hasEnumType(c) + if !ok { + continue + } + if e.T == "" { + return fmt.Errorf("missing enum name for column %q", c.Name) + } + if exists, err := s.enumExists(ctx, t.Schema, e); err != nil { + return err + } else if exists { + // Enum exists and was not created + // on this migration phase. + continue + } + name := s.enumIdent(t.Schema, e) + if prev, ok := s.createdE[name]; ok { + if !sqlx.ValuesEqual(prev.Values, e.Values) { + return fmt.Errorf("enum type %s has inconsistent desired state: %q != %q", name, prev.Values, e.Values) + } + continue + } + s.createdE[name] = e + create, drop := s.createDropEnum(t.Schema, e) + s.append(&migrate.Change{ + Cmd: create, + Reverse: drop, + Comment: fmt.Sprintf("create enum type %q", e.T), + }) + } + return nil +} + +func (s *state) alterEnum(t *schema.Table, from, to *schema.EnumType) error { + if len(from.Values) > len(to.Values) { + return fmt.Errorf("dropping enum (%q) value is not supported", from.T) + } + for i := range from.Values { + if from.Values[i] != to.Values[i] { + return fmt.Errorf("replacing or reordering enum (%q) value is not supported: %q != %q", to.T, to.Values, from.Values) + } + } + name := s.enumIdent(t.Schema, from) + if prev, ok := s.alteredE[name]; ok { + if !sqlx.ValuesEqual(prev.Values, to.Values) { + return fmt.Errorf("enum type %s has inconsistent desired state: %q != %q", name, prev.Values, to.Values) + } + return nil + } + s.alteredE[name] = to + for _, v := range to.Values[len(from.Values):] { + s.append(&migrate.Change{ + Cmd: s.Build("ALTER TYPE").P(name, "ADD VALUE", quote(v)).String(), + Comment: fmt.Sprintf("add value to enum type: %q", from.T), + }) + } + return nil +} + +func (s *state) enumExists(ctx context.Context, ns *schema.Schema, e *schema.EnumType) (bool, error) { + query, args := `SELECT * FROM pg_type t JOIN pg_namespace n on t.typnamespace = n.oid WHERE t.typname = $1 AND t.typtype = 'e'`, []any{e.T} + if es := s.enumSchema(ns, e); es != "" { + query += " AND n.nspname = $2" + args = append(args, es) + } + rows, err := s.QueryContext(ctx, query, args...) + if err != nil { + return false, fmt.Errorf("check enum existence: %w", err) + } + defer rows.Close() + return rows.Next(), rows.Err() +} + +// mayDropEnum drops dangling enum types from the schema. An optional +// "dropped" list can be provided to skip while searching for usage. +func (s *state) mayDropEnum(cmd *changeGroup, ns *schema.Schema, e *schema.EnumType) error { + name := s.enumIdent(ns, e) + if _, ok := s.droppedE[name]; ok { + return nil + } + schemas := []*schema.Schema{ns} + // In case there is a realm attached, traverse the entire tree. + if ns.Realm != nil && len(ns.Realm.Schemas) > 0 { + schemas = ns.Realm.Schemas + } + for i := range schemas { + for _, t := range schemas[i].Tables { + // Skip dropped tables. + if containsT(s.droppedT, t) { + continue + } + for _, c := range t.Columns { + e1, ok := hasEnumType(c) + // Although we search in siblings schemas, use the + // table's one for building the enum identifier. + if ok && s.enumIdent(ns, e1) == name { + return nil + } + } + } + } + s.droppedE[name] = e + create, drop := s.createDropEnum(ns, e) + cmd.after = append(cmd.after, &migrate.Change{ + Cmd: drop, + Reverse: create, + Comment: fmt.Sprintf("drop enum type %q", e.T), + }) + return nil +} + +func (s *state) addIndexes(t *schema.Table, adds ...*schema.AddIndex) error { + for _, add := range adds { + b, idx := s.Build("CREATE"), add.I + if idx.Unique { + b.P("UNIQUE") + } + b.P("INDEX") + if sqlx.Has(add.Extra, &Concurrently{}) { + b.P("CONCURRENTLY") + } + if idx.Name != "" { + b.Ident(idx.Name) + } + b.P("ON").Table(t) + if err := s.index(b, idx); err != nil { + return err + } + s.append(&migrate.Change{ + Cmd: b.String(), + Comment: fmt.Sprintf("create index %q to table: %q", idx.Name, t.Name), + Reverse: func() string { + b := s.Build("DROP INDEX") + if sqlx.Has(add.Extra, &Concurrently{}) { + b.P("CONCURRENTLY") + } + // Unlike MySQL, the DROP command is not attached to ALTER TABLE. + // Therefore, we print indexes with their qualified name, because + // the connection that executes the statements may not be attached + // to this schema. + if t.Schema != nil { + b.WriteString(s.schemaPrefix(t.Schema)) + } + b.Ident(idx.Name) + return b.String() + }(), + }) + } + return nil +} + +func (s *state) column(b *sqlx.Builder, t *schema.Table, c *schema.Column) error { + f, err := s.formatType(t, c) + if err != nil { + return err + } + b.Ident(c.Name).P(f) + if !c.Type.Null { + b.P("NOT") + } else if t, ok := c.Type.Type.(*SerialType); ok { + return fmt.Errorf("NOT NULL constraint is required for %s column %q", t.T, c.Name) + } + b.P("NULL") + s.columnDefault(b, c) + for _, attr := range c.Attrs { + switch a := attr.(type) { + case *schema.Comment: + case *schema.Collation: + b.P("COLLATE").Ident(a.V) + case *Identity, *schema.GeneratedExpr: + // Handled below. + default: + return fmt.Errorf("unexpected column attribute: %T", attr) + } + } + switch hasI, hasX := sqlx.Has(c.Attrs, &Identity{}), sqlx.Has(c.Attrs, &schema.GeneratedExpr{}); { + case hasI && hasX: + return fmt.Errorf("both identity and generation expression specified for column %q", c.Name) + case hasI: + id, _ := identity(c.Attrs) + b.P("GENERATED", id.Generation, "AS IDENTITY") + if id.Sequence.Start != defaultSeqStart || id.Sequence.Increment != defaultSeqIncrement { + b.Wrap(func(b *sqlx.Builder) { + if id.Sequence.Start != defaultSeqStart { + b.P("START WITH", strconv.FormatInt(id.Sequence.Start, 10)) + } + if id.Sequence.Increment != defaultSeqIncrement { + b.P("INCREMENT BY", strconv.FormatInt(id.Sequence.Increment, 10)) + } + }) + } + case hasX: + x := &schema.GeneratedExpr{} + sqlx.Has(c.Attrs, x) + b.P("GENERATED ALWAYS AS", sqlx.MayWrap(x.Expr), "STORED") + } + return nil +} + +// columnDefault writes the default value of column to the builder. +func (s *state) columnDefault(b *sqlx.Builder, c *schema.Column) { + switch x := c.Default.(type) { + case *schema.Literal: + v := x.V + switch c.Type.Type.(type) { + case *schema.BoolType, *schema.DecimalType, *schema.IntegerType, *schema.FloatType: + default: + v = quote(v) + } + b.P("DEFAULT", v) + case *schema.RawExpr: + // Ignore identity functions added by the Differ. + if _, ok := c.Type.Type.(*SerialType); !ok { + b.P("DEFAULT", x.X) + } + } +} + +func (s *state) indexParts(b *sqlx.Builder, idx *schema.Index) (err error) { + b.Wrap(func(b *sqlx.Builder) { + err = b.MapCommaErr(idx.Parts, func(i int, b *sqlx.Builder) error { + switch part := idx.Parts[i]; { + case part.C != nil: + b.Ident(part.C.Name) + case part.X != nil: + b.WriteString(sqlx.MayWrap(part.X.(*schema.RawExpr).X)) + } + return s.partAttrs(b, idx, idx.Parts[i]) + }) + }) + return +} + +func (s *state) partAttrs(b *sqlx.Builder, idx *schema.Index, p *schema.IndexPart) error { + if c := (schema.Collation{}); sqlx.Has(p.Attrs, &c) { + b.P("COLLATE").Ident(c.V) + } + if op := (IndexOpClass{}); sqlx.Has(p.Attrs, &op) { + d, err := op.DefaultFor(idx, p) + if err != nil { + return err + } + if !d { + b.P(op.String()) + } + } + if p.Desc { + b.P("DESC") + } + for _, attr := range p.Attrs { + switch attr := attr.(type) { + case *IndexColumnProperty: + switch { + // Defaults when DESC is specified. + case p.Desc && attr.NullsFirst: + case p.Desc && attr.NullsLast: + b.P("NULLS LAST") + // Defaults when DESC is not specified. + case !p.Desc && attr.NullsLast: + case !p.Desc && attr.NullsFirst: + b.P("NULLS FIRST") + } + // Handled above. + case *IndexOpClass, *schema.Collation: + default: + return fmt.Errorf("postgres: unexpected index part attribute: %T", attr) + } + } + return nil +} + +func (s *state) index(b *sqlx.Builder, idx *schema.Index) error { + // Avoid appending the default method. + if t := (IndexType{}); sqlx.Has(idx.Attrs, &t) && strings.ToUpper(t.T) != IndexTypeBTree { + b.P("USING", t.T) + } + if err := s.indexParts(b, idx); err != nil { + return err + } + if c := (IndexInclude{}); sqlx.Has(idx.Attrs, &c) { + b.P("INCLUDE") + b.Wrap(func(b *sqlx.Builder) { + b.MapComma(c.Columns, func(i int, b *sqlx.Builder) { + b.Ident(c.Columns[i].Name) + }) + }) + } + if p, ok := indexStorageParams(idx.Attrs); ok { + b.P("WITH") + b.Wrap(func(b *sqlx.Builder) { + var parts []string + if p.AutoSummarize { + parts = append(parts, "autosummarize = true") + } + if p.PagesPerRange != 0 && p.PagesPerRange != defaultPagePerRange { + parts = append(parts, fmt.Sprintf("pages_per_range = %d", p.PagesPerRange)) + } + b.WriteString(strings.Join(parts, ", ")) + }) + } + if p := (IndexPredicate{}); sqlx.Has(idx.Attrs, &p) { + b.P("WHERE").P(p.P) + } + for _, attr := range idx.Attrs { + switch attr.(type) { + case *schema.Comment, *IndexType, *IndexInclude, *Constraint, *IndexPredicate, *IndexStorageParams: + default: + return fmt.Errorf("postgres: unexpected index attribute: %T", attr) + } + } + return nil +} + +func (s *state) fks(b *sqlx.Builder, fks ...*schema.ForeignKey) { + b.MapIndent(fks, func(i int, b *sqlx.Builder) { + fk := fks[i] + if fk.Symbol != "" { + b.P("CONSTRAINT").Ident(fk.Symbol) + } + b.P("FOREIGN KEY") + b.Wrap(func(b *sqlx.Builder) { + b.MapComma(fk.Columns, func(i int, b *sqlx.Builder) { + b.Ident(fk.Columns[i].Name) + }) + }) + b.P("REFERENCES").Table(fk.RefTable) + b.Wrap(func(b *sqlx.Builder) { + b.MapComma(fk.RefColumns, func(i int, b *sqlx.Builder) { + b.Ident(fk.RefColumns[i].Name) + }) + }) + if fk.OnUpdate != "" { + b.P("ON UPDATE", string(fk.OnUpdate)) + } + if fk.OnDelete != "" { + b.P("ON DELETE", string(fk.OnDelete)) + } + }) +} + +func (s *state) append(c ...*migrate.Change) { + s.Changes = append(s.Changes, c...) +} + +// Build instantiates a new builder and writes the given phrase to it. +func (s *state) Build(phrases ...string) *sqlx.Builder { + b := &sqlx.Builder{QuoteChar: '"', Schema: s.SchemaQualifier, Indent: s.Indent} + return b.P(phrases...) +} + +// skipAutoChanges filters unnecessary changes that are automatically +// happened by the database when ALTER TABLE is executed. +func skipAutoChanges(changes []schema.Change) []schema.Change { + var ( + dropC = make(map[string]bool) + planned = make([]schema.Change, 0, len(changes)) + ) + for _, c := range changes { + if c, ok := c.(*schema.DropColumn); ok { + dropC[c.C.Name] = true + } + } +search: + for _, c := range changes { + switch c := c.(type) { + // Indexes involving the column are automatically dropped + // with it. This is true for multi-columns indexes as well. + // See https://www.postgresql.org/docs/current/sql-altertable.html + case *schema.DropIndex: + for _, p := range c.I.Parts { + if p.C != nil && dropC[p.C.Name] { + continue search + } + } + // Simple case for skipping constraint dropping, + // if the child table columns were dropped. + case *schema.DropForeignKey: + for _, c := range c.F.Columns { + if dropC[c.Name] { + continue search + } + } + } + planned = append(planned, c) + } + return planned +} + +// commentChange extracts the information for modifying a comment from the given change. +func commentChange(c schema.Change) (from, to string, err error) { + switch c := c.(type) { + case *schema.AddAttr: + toC, ok := c.A.(*schema.Comment) + if ok { + to = toC.Text + return + } + err = fmt.Errorf("unexpected AddAttr.(%T) for comment change", c.A) + case *schema.ModifyAttr: + fromC, ok1 := c.From.(*schema.Comment) + toC, ok2 := c.To.(*schema.Comment) + if ok1 && ok2 { + from, to = fromC.Text, toC.Text + return + } + err = fmt.Errorf("unsupported ModifyAttr(%T, %T) change", c.From, c.To) + default: + err = fmt.Errorf("unexpected change %T", c) + } + return +} + +// checks writes the CHECK constraint to the builder. +func check(b *sqlx.Builder, c *schema.Check) { + if c.Name != "" { + b.P("CONSTRAINT").Ident(c.Name) + } + b.P("CHECK", sqlx.MayWrap(c.Expr)) + if sqlx.Has(c.Attrs, &NoInherit{}) { + b.P("NO INHERIT") + } +} + +// isUniqueConstraint reports if the index is a valid UNIQUE constraint. +func isUniqueConstraint(i *schema.Index) bool { + hasC := func() bool { + for _, a := range i.Attrs { + if c, ok := a.(*Constraint); ok && c.IsUnique() { + return true + } + } + return false + }() + if !hasC || !i.Unique { + return false + } + // UNIQUE constraint cannot use functional indexes, + // and all its parts must have the default sort ordering. + for _, p := range i.Parts { + if p.X != nil || p.Desc { + return false + } + } + for _, a := range i.Attrs { + switch a := a.(type) { + // UNIQUE constraints must have BTREE type indexes. + case *IndexType: + if strings.ToUpper(a.T) != IndexTypeBTree { + return false + } + // Partial indexes are not allowed. + case *IndexPredicate: + return false + } + } + return true +} + +func quote(s string) string { + if sqlx.IsQuoted(s, '\'') { + return s + } + return "'" + strings.ReplaceAll(s, "'", "''") + "'" +} + +func (s *state) createDropEnum(sc *schema.Schema, e *schema.EnumType) (string, string) { + name := s.enumIdent(sc, e) + return s.Build("CREATE TYPE"). + P(name, "AS ENUM"). + Wrap(func(b *sqlx.Builder) { + b.MapComma(e.Values, func(i int, b *sqlx.Builder) { + b.WriteString(quote(e.Values[i])) + }) + }). + String(), + s.Build("DROP TYPE").P(name).String() +} + +func (s *state) enumIdent(ns *schema.Schema, e *schema.EnumType) string { + es := s.enumSchema(ns, e) + if es != "" { + return fmt.Sprintf("%q.%q", es, e.T) + } + return strconv.Quote(e.T) +} + +func (s *state) enumSchema(ns *schema.Schema, e *schema.EnumType) (es string) { + switch { + // In case the plan uses a specific schema qualifier. + case s.SchemaQualifier != nil: + es = *s.SchemaQualifier + // Enum schema has higher precedence. + case e.Schema != nil: + es = e.Schema.Name + // Fallback to table schema if exists. + case ns != nil: + es = ns.Name + } + return +} + +// enumType returns all the enum types used by the given table. +func (s *state) enumTypes(t *schema.Table) []*schema.EnumType { + var ( + es []*schema.EnumType + seen = make(map[string]bool) + ) + for _, c := range t.Columns { + if e, ok := hasEnumType(c); ok && !seen[s.enumIdent(t.Schema, e)] { + seen[s.enumIdent(t.Schema, e)] = true + es = append(es, e) + } + } + return es +} + +// schemaPrefix returns the schema prefix based on the planner config. +func (s *state) schemaPrefix(ns *schema.Schema) string { + switch { + case s.SchemaQualifier != nil: + // In case the qualifier is empty, ignore. + if *s.SchemaQualifier != "" { + return fmt.Sprintf("%q.", *s.SchemaQualifier) + } + case ns != nil && ns.Name != "": + return fmt.Sprintf("%q.", ns.Name) + } + return "" +} + +// formatType formats the type but takes into account the qualifier. +func (s *state) formatType(t *schema.Table, c *schema.Column) (string, error) { + switch tt := c.Type.Type.(type) { + case *schema.EnumType: + return s.enumIdent(t.Schema, tt), nil + case *ArrayType: + if e, ok := tt.Type.(*schema.EnumType); ok { + return s.enumIdent(t.Schema, e) + "[]", nil + } + } + return FormatType(c.Type.Type) +} + +func hasEnumType(c *schema.Column) (*schema.EnumType, bool) { + switch t := c.Type.Type.(type) { + case *schema.EnumType: + return t, true + case *ArrayType: + if e, ok := t.Type.(*schema.EnumType); ok { + return e, true + } + } + return nil, false +} + +func containsT(ts []*schema.Table, t *schema.Table) bool { + for _, t1 := range ts { + if t1.Schema.Name == t.Schema.Name && t1.Name == t.Name { + return true + } + } + return false +} + +func pkName(t *schema.Table, pk *schema.Index) string { + if pk.Name != "" { + return pk.Name + } + // The default naming for primary-key constraints is
_pkey. + // See: the ChooseIndexName function in PostgreSQL for more reference. + return t.Name + "_pkey" +} + +// dropConst indicates if the given change is a constraint drop. +func dropConst(c schema.Change) bool { + switch c.(type) { + case *schema.DropIndex, *schema.DropPrimaryKey, *schema.DropCheck, *schema.DropForeignKey: + return true + default: + return false + } +} diff --git a/vendor/ariga.io/atlas/sql/postgres/sqlspec.go b/vendor/ariga.io/atlas/sql/postgres/sqlspec.go new file mode 100644 index 00000000..533d7063 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/postgres/sqlspec.go @@ -0,0 +1,838 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package postgres + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "ariga.io/atlas/schemahcl" + "ariga.io/atlas/sql/internal/specutil" + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/postgres/internal/postgresop" + "ariga.io/atlas/sql/schema" + "ariga.io/atlas/sql/sqlspec" + + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/zclconf/go-cty/cty" +) + +type ( + doc struct { + Tables []*sqlspec.Table `spec:"table"` + Enums []*Enum `spec:"enum"` + Schemas []*sqlspec.Schema `spec:"schema"` + } + // Enum holds a specification for an enum, that can be referenced as a column type. + Enum struct { + Name string `spec:",name"` + Schema *schemahcl.Ref `spec:"schema"` + Values []string `spec:"values"` + schemahcl.DefaultExtension + } +) + +func init() { + schemahcl.Register("enum", &Enum{}) +} + +// evalSpec evaluates an Atlas DDL document into v using the input. +func evalSpec(p *hclparse.Parser, v any, input map[string]cty.Value) error { + switch v := v.(type) { + case *schema.Realm: + var d doc + if err := hclState.Eval(p, &d, input); err != nil { + return err + } + if err := specutil.Scan(v, d.Schemas, d.Tables, convertTable); err != nil { + return fmt.Errorf("specutil: failed converting to *schema.Realm: %w", err) + } + if len(d.Enums) > 0 { + if err := convertEnums(d.Tables, d.Enums, v); err != nil { + return err + } + } + case *schema.Schema: + var d doc + if err := hclState.Eval(p, &d, input); err != nil { + return err + } + if len(d.Schemas) != 1 { + return fmt.Errorf("specutil: expecting document to contain a single schema, got %d", len(d.Schemas)) + } + r := &schema.Realm{} + if err := specutil.Scan(r, d.Schemas, d.Tables, convertTable); err != nil { + return err + } + if err := convertEnums(d.Tables, d.Enums, r); err != nil { + return err + } + *v = *r.Schemas[0] + case schema.Schema, schema.Realm: + return fmt.Errorf("postgres: Eval expects a pointer: received %[1]T, expected *%[1]T", v) + default: + return hclState.Eval(p, v, input) + } + return nil +} + +// MarshalSpec marshals v into an Atlas DDL document using a schemahcl.Marshaler. +func MarshalSpec(v any, marshaler schemahcl.Marshaler) ([]byte, error) { + var d doc + switch s := v.(type) { + case *schema.Schema: + var err error + doc, err := schemaSpec(s) + if err != nil { + return nil, fmt.Errorf("specutil: failed converting schema to spec: %w", err) + } + d.Tables = doc.Tables + d.Schemas = doc.Schemas + d.Enums = doc.Enums + case *schema.Realm: + for _, s := range s.Schemas { + doc, err := schemaSpec(s) + if err != nil { + return nil, fmt.Errorf("specutil: failed converting schema to spec: %w", err) + } + d.Tables = append(d.Tables, doc.Tables...) + d.Schemas = append(d.Schemas, doc.Schemas...) + d.Enums = append(d.Enums, doc.Enums...) + } + if err := specutil.QualifyDuplicates(d.Tables); err != nil { + return nil, err + } + if err := specutil.QualifyReferences(d.Tables, s); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("specutil: failed marshaling spec. %T is not supported", v) + } + return marshaler.MarshalSpec(&d) +} + +var ( + hclState = schemahcl.New( + schemahcl.WithTypes("table.column.type", TypeRegistry.Specs()), + schemahcl.WithScopedEnums("table.index.type", IndexTypeBTree, IndexTypeBRIN, IndexTypeHash, IndexTypeGIN, IndexTypeGiST, "GiST", IndexTypeSPGiST, "SPGiST"), + schemahcl.WithScopedEnums("table.partition.type", PartitionTypeRange, PartitionTypeList, PartitionTypeHash), + schemahcl.WithScopedEnums("table.column.identity.generated", GeneratedTypeAlways, GeneratedTypeByDefault), + schemahcl.WithScopedEnums("table.column.as.type", "STORED"), + schemahcl.WithScopedEnums("table.foreign_key.on_update", specutil.ReferenceVars...), + schemahcl.WithScopedEnums("table.foreign_key.on_delete", specutil.ReferenceVars...), + schemahcl.WithScopedEnums("table.index.on.ops", func() (ops []string) { + for _, op := range postgresop.Classes { + ops = append(ops, op.Name) + } + return ops + }()...), + ) + // MarshalHCL marshals v into an Atlas HCL DDL document. + MarshalHCL = schemahcl.MarshalerFunc(func(v any) ([]byte, error) { + return MarshalSpec(v, hclState) + }) + // EvalHCL implements the schemahcl.Evaluator interface. + EvalHCL = schemahcl.EvalFunc(evalSpec) + + // EvalHCLBytes is a helper that evaluates an HCL document from a byte slice instead + // of from an hclparse.Parser instance. + EvalHCLBytes = specutil.HCLBytesFunc(EvalHCL) +) + +// convertTable converts a sqlspec.Table to a schema.Table. Table conversion is done without converting +// ForeignKeySpecs into ForeignKeys, as the target tables do not necessarily exist in the schema +// at this point. Instead, the linking is done by the convertSchema function. +func convertTable(spec *sqlspec.Table, parent *schema.Schema) (*schema.Table, error) { + t, err := specutil.Table(spec, parent, convertColumn, convertPK, convertIndex, specutil.Check) + if err != nil { + return nil, err + } + if err := convertPartition(spec.Extra, t); err != nil { + return nil, err + } + return t, nil +} + +// convertPartition converts and appends the partition block into the table attributes if exists. +func convertPartition(spec schemahcl.Resource, table *schema.Table) error { + r, ok := spec.Resource("partition") + if !ok { + return nil + } + var p struct { + Type string `spec:"type"` + Columns []*schemahcl.Ref `spec:"columns"` + Parts []*struct { + Expr string `spec:"expr"` + Column *schemahcl.Ref `spec:"column"` + } `spec:"by"` + } + if err := r.As(&p); err != nil { + return fmt.Errorf("parsing %s.partition: %w", table.Name, err) + } + if p.Type == "" { + return fmt.Errorf("missing attribute %s.partition.type", table.Name) + } + key := &Partition{T: p.Type} + switch n, m := len(p.Columns), len(p.Parts); { + case n == 0 && m == 0: + return fmt.Errorf("missing columns or expressions for %s.partition", table.Name) + case n > 0 && m > 0: + return fmt.Errorf(`multiple definitions for %s.partition, use "columns" or "by"`, table.Name) + case n > 0: + for _, r := range p.Columns { + c, err := specutil.ColumnByRef(table, r) + if err != nil { + return err + } + key.Parts = append(key.Parts, &PartitionPart{C: c}) + } + case m > 0: + for i, p := range p.Parts { + switch { + case p.Column == nil && p.Expr == "": + return fmt.Errorf("missing column or expression for %s.partition.by at position %d", table.Name, i) + case p.Column != nil && p.Expr != "": + return fmt.Errorf("multiple definitions for %s.partition.by at position %d", table.Name, i) + case p.Column != nil: + c, err := specutil.ColumnByRef(table, p.Column) + if err != nil { + return err + } + key.Parts = append(key.Parts, &PartitionPart{C: c}) + case p.Expr != "": + key.Parts = append(key.Parts, &PartitionPart{X: &schema.RawExpr{X: p.Expr}}) + } + } + } + table.AddAttrs(key) + return nil +} + +// fromPartition returns the resource spec for representing the partition block. +func fromPartition(p Partition) *schemahcl.Resource { + key := &schemahcl.Resource{ + Type: "partition", + Attrs: []*schemahcl.Attr{ + specutil.VarAttr("type", strings.ToUpper(specutil.Var(p.T))), + }, + } + columns, ok := func() ([]*schemahcl.Ref, bool) { + parts := make([]*schemahcl.Ref, 0, len(p.Parts)) + for _, p := range p.Parts { + if p.C == nil { + return nil, false + } + parts = append(parts, specutil.ColumnRef(p.C.Name)) + } + return parts, true + }() + if ok { + key.Attrs = append(key.Attrs, schemahcl.RefsAttr("columns", columns...)) + return key + } + for _, p := range p.Parts { + part := &schemahcl.Resource{Type: "by"} + switch { + case p.C != nil: + part.Attrs = append(part.Attrs, schemahcl.RefAttr("column", specutil.ColumnRef(p.C.Name))) + case p.X != nil: + part.Attrs = append(part.Attrs, schemahcl.StringAttr("expr", p.X.(*schema.RawExpr).X)) + } + key.Children = append(key.Children, part) + } + return key +} + +// convertColumn converts a sqlspec.Column into a schema.Column. +func convertColumn(spec *sqlspec.Column, _ *schema.Table) (*schema.Column, error) { + if err := fixDefaultQuotes(spec); err != nil { + return nil, err + } + c, err := specutil.Column(spec, convertColumnType) + if err != nil { + return nil, err + } + if r, ok := spec.Extra.Resource("identity"); ok { + id, err := convertIdentity(r) + if err != nil { + return nil, err + } + c.Attrs = append(c.Attrs, id) + } + if err := specutil.ConvertGenExpr(spec.Remain(), c, generatedType); err != nil { + return nil, err + } + return c, nil +} + +func convertIdentity(r *schemahcl.Resource) (*Identity, error) { + var spec struct { + Generation string `spec:"generated"` + Start int64 `spec:"start"` + Increment int64 `spec:"increment"` + } + if err := r.As(&spec); err != nil { + return nil, err + } + id := &Identity{Generation: specutil.FromVar(spec.Generation), Sequence: &Sequence{}} + if spec.Start != 0 { + id.Sequence.Start = spec.Start + } + if spec.Increment != 0 { + id.Sequence.Increment = spec.Increment + } + return id, nil +} + +// fixDefaultQuotes fixes the quotes on the Default field to be single quotes +// instead of double quotes. +func fixDefaultQuotes(spec *sqlspec.Column) error { + if spec.Default.Type() != cty.String { + return nil + } + if s := spec.Default.AsString(); sqlx.IsQuoted(s, '"') { + uq, err := strconv.Unquote(s) + if err != nil { + return err + } + s = "'" + uq + "'" + spec.Default = cty.StringVal(s) + } + return nil +} + +// convertPK converts a sqlspec.PrimaryKey into a schema.Index. +func convertPK(spec *sqlspec.PrimaryKey, parent *schema.Table) (*schema.Index, error) { + idx, err := specutil.PrimaryKey(spec, parent) + if err != nil { + return nil, err + } + if err := convertIndexPK(spec, parent, idx); err != nil { + return nil, err + } + return idx, nil +} + +// convertIndex converts a sqlspec.Index into a schema.Index. +func convertIndex(spec *sqlspec.Index, t *schema.Table) (*schema.Index, error) { + idx, err := specutil.Index(spec, t, convertPart) + if err != nil { + return nil, err + } + if attr, ok := spec.Attr("type"); ok { + t, err := attr.String() + if err != nil { + return nil, err + } + idx.Attrs = append(idx.Attrs, &IndexType{T: strings.ToUpper(t)}) + } + if attr, ok := spec.Attr("where"); ok { + p, err := attr.String() + if err != nil { + return nil, err + } + idx.Attrs = append(idx.Attrs, &IndexPredicate{P: p}) + } + if err := convertIndexPK(spec, t, idx); err != nil { + return nil, err + } + return idx, nil +} + +// convertIndexPK converts the index parameters shared between primary and secondary indexes. +func convertIndexPK(spec specutil.Attrer, t *schema.Table, idx *schema.Index) error { + if attr, ok := spec.Attr("page_per_range"); ok { + p, err := attr.Int64() + if err != nil { + return err + } + idx.Attrs = append(idx.Attrs, &IndexStorageParams{PagesPerRange: p}) + } + if attr, ok := spec.Attr("include"); ok { + refs, err := attr.Refs() + if err != nil { + return err + } + if len(refs) == 0 { + return fmt.Errorf("unexpected empty INCLUDE in index %q definition", idx.Name) + } + include := make([]*schema.Column, len(refs)) + for i, r := range refs { + if include[i], err = specutil.ColumnByRef(t, r); err != nil { + return err + } + } + idx.Attrs = append(idx.Attrs, &IndexInclude{Columns: include}) + } + return nil +} + +func convertPart(spec *sqlspec.IndexPart, part *schema.IndexPart) error { + switch opc, ok := spec.Attr("ops"); { + case !ok: + case opc.IsRawExpr(): + expr, err := opc.RawExpr() + if err != nil { + return err + } + var op IndexOpClass + if err := op.UnmarshalText([]byte(expr.X)); err != nil { + return fmt.Errorf("unexpected index.on.ops expression %q: %w", expr.X, err) + } + if op.Name != "" { + part.Attrs = append(part.Attrs, &op) + } + case opc.IsRef(): + name, err := opc.Ref() + if err != nil { + return err + } + part.Attrs = append(part.Attrs, &IndexOpClass{Name: name}) + default: + name, err := opc.String() + if err != nil { + return err + } + part.Attrs = append(part.Attrs, &IndexOpClass{Name: name}) + } + return nil +} + +const defaultTimePrecision = 6 + +// convertColumnType converts a sqlspec.Column into a concrete Postgres schema.Type. +func convertColumnType(spec *sqlspec.Column) (schema.Type, error) { + typ, err := TypeRegistry.Type(spec.Type, spec.Extra.Attrs) + if err != nil { + return nil, err + } + // Handle default values for time precision types. + if t, ok := typ.(*schema.TimeType); ok && strings.HasPrefix(t.T, "time") { + if _, ok := attr(spec.Type, "precision"); !ok { + p := defaultTimePrecision + t.Precision = &p + } + } + return typ, nil +} + +// convertEnums converts possibly referenced column types (like enums) to +// an actual schema.Type and sets it on the correct schema.Column. +func convertEnums(tables []*sqlspec.Table, enums []*Enum, r *schema.Realm) error { + var ( + used = make(map[*Enum]struct{}) + byName = make(map[string]*Enum) + ) + for _, e := range enums { + byName[e.Name] = e + } + for _, t := range tables { + for _, c := range t.Columns { + var enum *Enum + switch { + case c.Type.IsRef: + n, err := enumName(c.Type) + if err != nil { + return err + } + e, ok := byName[n] + if !ok { + return fmt.Errorf("enum %q was not found", n) + } + enum = e + default: + n, ok := arrayType(c.Type.T) + if !ok || byName[n] == nil { + continue + } + enum = byName[n] + } + used[enum] = struct{}{} + schemaE, err := specutil.SchemaName(enum.Schema) + if err != nil { + return fmt.Errorf("extract schema name from enum refrence: %w", err) + } + es, ok := r.Schema(schemaE) + if !ok { + return fmt.Errorf("schema %q not found in realm for table %q", schemaE, t.Name) + } + schemaT, err := specutil.SchemaName(t.Schema) + if err != nil { + return fmt.Errorf("extract schema name from table refrence: %w", err) + } + ts, ok := r.Schema(schemaT) + if !ok { + return fmt.Errorf("schema %q not found in realm for table %q", schemaT, t.Name) + } + tt, ok := ts.Table(t.Name) + if !ok { + return fmt.Errorf("table %q not found in schema %q", t.Name, ts.Name) + } + cc, ok := tt.Column(c.Name) + if !ok { + return fmt.Errorf("column %q not found in table %q", c.Name, t.Name) + } + e := &schema.EnumType{T: enum.Name, Schema: es, Values: enum.Values} + switch t := cc.Type.Type.(type) { + case *ArrayType: + t.Type = e + default: + cc.Type.Type = e + } + } + } + for _, e := range enums { + if _, ok := used[e]; !ok { + return fmt.Errorf("enum %q declared but not used", e.Name) + } + } + return nil +} + +// enumName extracts the name of the referenced Enum from the reference string. +func enumName(ref *schemahcl.Type) (string, error) { + s := strings.Split(ref.T, "$enum.") + if len(s) != 2 { + return "", fmt.Errorf("postgres: failed to extract enum name from %q", ref.T) + } + return s[1], nil +} + +// enumRef returns a reference string to the given enum name. +func enumRef(n string) *schemahcl.Ref { + return &schemahcl.Ref{ + V: "$enum." + n, + } +} + +// schemaSpec converts from a concrete Postgres schema to Atlas specification. +func schemaSpec(schem *schema.Schema) (*doc, error) { + s, tbls, err := specutil.FromSchema(schem, tableSpec) + if err != nil { + return nil, err + } + d := &doc{ + Tables: tbls, + Schemas: []*sqlspec.Schema{s}, + } + enums := make(map[string]bool) + for _, t := range schem.Tables { + for _, c := range t.Columns { + if e, ok := hasEnumType(c); ok && !enums[e.T] { + d.Enums = append(d.Enums, &Enum{ + Name: e.T, + Schema: specutil.SchemaRef(s.Name), + Values: e.Values, + }) + enums[e.T] = true + } + } + } + return d, nil +} + +// tableSpec converts from a concrete Postgres sqlspec.Table to a schema.Table. +func tableSpec(table *schema.Table) (*sqlspec.Table, error) { + spec, err := specutil.FromTable( + table, + columnSpec, + pkSpec, + indexSpec, + specutil.FromForeignKey, + specutil.FromCheck, + ) + if err != nil { + return nil, err + } + if p := (Partition{}); sqlx.Has(table.Attrs, &p) { + spec.Extra.Children = append(spec.Extra.Children, fromPartition(p)) + } + return spec, nil +} + +func pkSpec(idx *schema.Index) (*sqlspec.PrimaryKey, error) { + spec, err := specutil.FromPrimaryKey(idx) + if err != nil { + return nil, err + } + spec.Extra.Attrs = indexPKSpec(idx, spec.Extra.Attrs) + return spec, nil +} + +func indexSpec(idx *schema.Index) (*sqlspec.Index, error) { + spec, err := specutil.FromIndex(idx, partAttr) + if err != nil { + return nil, err + } + // Avoid printing the index type if it is the default. + if i := (IndexType{}); sqlx.Has(idx.Attrs, &i) && strings.ToUpper(i.T) != IndexTypeBTree { + spec.Extra.Attrs = append(spec.Extra.Attrs, specutil.VarAttr("type", strings.ToUpper(i.T))) + } + if i := (IndexPredicate{}); sqlx.Has(idx.Attrs, &i) && i.P != "" { + spec.Extra.Attrs = append(spec.Extra.Attrs, specutil.VarAttr("where", strconv.Quote(i.P))) + } + spec.Extra.Attrs = indexPKSpec(idx, spec.Extra.Attrs) + return spec, nil +} + +func indexPKSpec(idx *schema.Index, attrs []*schemahcl.Attr) []*schemahcl.Attr { + if i := (IndexInclude{}); sqlx.Has(idx.Attrs, &i) && len(i.Columns) > 0 { + refs := make([]*schemahcl.Ref, 0, len(i.Columns)) + for _, c := range i.Columns { + refs = append(refs, specutil.ColumnRef(c.Name)) + } + attrs = append(attrs, schemahcl.RefsAttr("include", refs...)) + } + if p, ok := indexStorageParams(idx.Attrs); ok { + attrs = append(attrs, schemahcl.Int64Attr("page_per_range", p.PagesPerRange)) + } + return attrs +} + +func partAttr(idx *schema.Index, part *schema.IndexPart, spec *sqlspec.IndexPart) error { + var op IndexOpClass + if !sqlx.Has(part.Attrs, &op) { + return nil + } + switch d, err := op.DefaultFor(idx, part); { + case err != nil: + return err + case d: + case len(op.Params) > 0: + spec.Extra.Attrs = append(spec.Extra.Attrs, schemahcl.RawAttr("ops", op.String())) + default: + spec.Extra.Attrs = append(spec.Extra.Attrs, specutil.VarAttr("ops", op.String())) + } + return nil +} + +// columnSpec converts from a concrete Postgres schema.Column into a sqlspec.Column. +func columnSpec(c *schema.Column, _ *schema.Table) (*sqlspec.Column, error) { + s, err := specutil.FromColumn(c, columnTypeSpec) + if err != nil { + return nil, err + } + if i := (&Identity{}); sqlx.Has(c.Attrs, i) { + s.Extra.Children = append(s.Extra.Children, fromIdentity(i)) + } + if x := (schema.GeneratedExpr{}); sqlx.Has(c.Attrs, &x) { + s.Extra.Children = append(s.Extra.Children, specutil.FromGenExpr(x, generatedType)) + } + return s, nil +} + +// fromIdentity returns the resource spec for representing the identity attributes. +func fromIdentity(i *Identity) *schemahcl.Resource { + id := &schemahcl.Resource{ + Type: "identity", + Attrs: []*schemahcl.Attr{ + specutil.VarAttr("generated", strings.ToUpper(specutil.Var(i.Generation))), + }, + } + if s := i.Sequence; s != nil { + if s.Start != 1 { + id.Attrs = append(id.Attrs, schemahcl.Int64Attr("start", s.Start)) + } + if s.Increment != 1 { + id.Attrs = append(id.Attrs, schemahcl.Int64Attr("increment", s.Increment)) + } + } + return id +} + +// columnTypeSpec converts from a concrete Postgres schema.Type into sqlspec.Column Type. +func columnTypeSpec(t schema.Type) (*sqlspec.Column, error) { + // Handle postgres enum types. They cannot be put into the TypeRegistry since their name is dynamic. + if e, ok := t.(*schema.EnumType); ok { + return &sqlspec.Column{Type: &schemahcl.Type{ + T: enumRef(e.T).V, + IsRef: true, + }}, nil + } + st, err := TypeRegistry.Convert(t) + if err != nil { + return nil, err + } + return &sqlspec.Column{Type: st}, nil +} + +// TypeRegistry contains the supported TypeSpecs for the Postgres driver. +var TypeRegistry = schemahcl.NewRegistry( + schemahcl.WithSpecFunc(typeSpec), + schemahcl.WithParser(ParseType), + schemahcl.WithSpecs( + schemahcl.NewTypeSpec(TypeBit, schemahcl.WithAttributes(&schemahcl.TypeAttr{Name: "len", Kind: reflect.Int64})), + schemahcl.AliasTypeSpec("bit_varying", TypeBitVar, schemahcl.WithAttributes(&schemahcl.TypeAttr{Name: "len", Kind: reflect.Int64})), + schemahcl.NewTypeSpec(TypeVarChar, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.AliasTypeSpec("character_varying", TypeCharVar, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeChar, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeCharacter, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeInt2), + schemahcl.NewTypeSpec(TypeInt4), + schemahcl.NewTypeSpec(TypeInt8), + schemahcl.NewTypeSpec(TypeInt), + schemahcl.NewTypeSpec(TypeInteger), + schemahcl.NewTypeSpec(TypeSmallInt), + schemahcl.NewTypeSpec(TypeBigInt), + schemahcl.NewTypeSpec(TypeText), + schemahcl.NewTypeSpec(TypeBoolean), + schemahcl.NewTypeSpec(TypeBool), + schemahcl.NewTypeSpec(TypeBytea), + schemahcl.NewTypeSpec(TypeCIDR), + schemahcl.NewTypeSpec(TypeInet), + schemahcl.NewTypeSpec(TypeMACAddr), + schemahcl.NewTypeSpec(TypeMACAddr8), + schemahcl.NewTypeSpec(TypeCircle), + schemahcl.NewTypeSpec(TypeLine), + schemahcl.NewTypeSpec(TypeLseg), + schemahcl.NewTypeSpec(TypeBox), + schemahcl.NewTypeSpec(TypePath), + schemahcl.NewTypeSpec(TypePoint), + schemahcl.NewTypeSpec(TypePolygon), + schemahcl.NewTypeSpec(TypeDate), + schemahcl.NewTypeSpec(TypeTime, schemahcl.WithAttributes(precisionTypeAttr()), formatTime()), + schemahcl.NewTypeSpec(TypeTimeTZ, schemahcl.WithAttributes(precisionTypeAttr()), formatTime()), + schemahcl.NewTypeSpec(TypeTimestampTZ, schemahcl.WithAttributes(precisionTypeAttr()), formatTime()), + schemahcl.NewTypeSpec(TypeTimestamp, schemahcl.WithAttributes(precisionTypeAttr()), formatTime()), + schemahcl.AliasTypeSpec("double_precision", TypeDouble), + schemahcl.NewTypeSpec(TypeReal), + schemahcl.NewTypeSpec(TypeFloat, schemahcl.WithAttributes(precisionTypeAttr())), + schemahcl.NewTypeSpec(TypeFloat8), + schemahcl.NewTypeSpec(TypeFloat4), + schemahcl.NewTypeSpec(TypeNumeric, schemahcl.WithAttributes(precisionTypeAttr(), &schemahcl.TypeAttr{Name: "scale", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeDecimal, schemahcl.WithAttributes(precisionTypeAttr(), &schemahcl.TypeAttr{Name: "scale", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeSmallSerial), + schemahcl.NewTypeSpec(TypeSerial), + schemahcl.NewTypeSpec(TypeBigSerial), + schemahcl.NewTypeSpec(TypeSerial2), + schemahcl.NewTypeSpec(TypeSerial4), + schemahcl.NewTypeSpec(TypeSerial8), + schemahcl.NewTypeSpec(TypeXML), + schemahcl.NewTypeSpec(TypeJSON), + schemahcl.NewTypeSpec(TypeJSONB), + schemahcl.NewTypeSpec(TypeUUID), + schemahcl.NewTypeSpec(TypeMoney), + schemahcl.NewTypeSpec(TypeTSVector), + schemahcl.NewTypeSpec(TypeTSQuery), + schemahcl.NewTypeSpec(TypeInt4Range), + schemahcl.NewTypeSpec(TypeInt4MultiRange), + schemahcl.NewTypeSpec(TypeInt8Range), + schemahcl.NewTypeSpec(TypeInt8MultiRange), + schemahcl.NewTypeSpec(TypeNumRange), + schemahcl.NewTypeSpec(TypeNumMultiRange), + schemahcl.NewTypeSpec(TypeTSRange), + schemahcl.NewTypeSpec(TypeTSMultiRange), + schemahcl.NewTypeSpec(TypeTSTZRange), + schemahcl.NewTypeSpec(TypeTSTZMultiRange), + schemahcl.NewTypeSpec(TypeDateRange), + schemahcl.NewTypeSpec(TypeDateMultiRange), + schemahcl.NewTypeSpec("hstore"), + schemahcl.NewTypeSpec("sql", schemahcl.WithAttributes(&schemahcl.TypeAttr{Name: "def", Required: true, Kind: reflect.String})), + ), + // PostgreSQL internal and special types. + schemahcl.WithSpecs(func() (specs []*schemahcl.TypeSpec) { + for _, t := range []string{ + typeOID, typeRegClass, typeRegCollation, typeRegConfig, typeRegDictionary, typeRegNamespace, + typeName, typeRegOper, typeRegOperator, typeRegProc, typeRegProcedure, typeRegRole, typeRegType, + } { + specs = append(specs, schemahcl.NewTypeSpec(t)) + } + return specs + }()...), + schemahcl.WithSpecs(func() (specs []*schemahcl.TypeSpec) { + opts := []schemahcl.TypeSpecOption{ + schemahcl.WithToSpec(func(t schema.Type) (*schemahcl.Type, error) { + i, ok := t.(*IntervalType) + if !ok { + return nil, fmt.Errorf("postgres: unexpected interval type %T", t) + } + spec := &schemahcl.Type{T: TypeInterval} + if i.F != "" { + spec.T = specutil.Var(strings.ToLower(i.F)) + } + if p := i.Precision; p != nil && *p != defaultTimePrecision { + spec.Attrs = []*schemahcl.Attr{schemahcl.IntAttr("precision", *p)} + } + return spec, nil + }), + schemahcl.WithFromSpec(func(t *schemahcl.Type) (schema.Type, error) { + i := &IntervalType{T: TypeInterval} + if t.T != TypeInterval { + i.F = specutil.FromVar(t.T) + } + if a, ok := attr(t, "precision"); ok { + p, err := a.Int() + if err != nil { + return nil, fmt.Errorf(`postgres: parsing attribute "precision": %w`, err) + } + if p != defaultTimePrecision { + i.Precision = &p + } + } + return i, nil + }), + } + for _, f := range []string{"interval", "second", "day to second", "hour to second", "minute to second"} { + specs = append(specs, schemahcl.NewTypeSpec(specutil.Var(f), append(opts, schemahcl.WithAttributes(precisionTypeAttr()))...)) + } + for _, f := range []string{"year", "month", "day", "hour", "minute", "year to month", "day to hour", "day to minute", "hour to minute"} { + specs = append(specs, schemahcl.NewTypeSpec(specutil.Var(f), opts...)) + } + return specs + }()...), +) + +func precisionTypeAttr() *schemahcl.TypeAttr { + return &schemahcl.TypeAttr{ + Name: "precision", + Kind: reflect.Int, + Required: false, + } +} + +func attr(typ *schemahcl.Type, key string) (*schemahcl.Attr, bool) { + for _, a := range typ.Attrs { + if a.K == key { + return a, true + } + } + return nil, false +} + +func typeSpec(t schema.Type) (*schemahcl.Type, error) { + if t, ok := t.(*schema.TimeType); ok && t.T != TypeDate { + spec := &schemahcl.Type{T: timeAlias(t.T)} + if p := t.Precision; p != nil && *p != defaultTimePrecision { + spec.Attrs = []*schemahcl.Attr{schemahcl.IntAttr("precision", *p)} + } + return spec, nil + } + s, err := FormatType(t) + if err != nil { + return nil, err + } + return &schemahcl.Type{T: s}, nil +} + +// formatTime overrides the default printing logic done by schemahcl.hclType. +func formatTime() schemahcl.TypeSpecOption { + return schemahcl.WithTypeFormatter(func(t *schemahcl.Type) (string, error) { + a, ok := attr(t, "precision") + if !ok { + return t.T, nil + } + p, err := a.Int() + if err != nil { + return "", fmt.Errorf(`postgres: parsing attribute "precision": %w`, err) + } + return FormatType(&schema.TimeType{T: t.T, Precision: &p}) + }) +} + +// generatedType returns the default and only type for a generated column. +func generatedType(string) string { return "STORED" } diff --git a/vendor/ariga.io/atlas/sql/schema/BUILD b/vendor/ariga.io/atlas/sql/schema/BUILD new file mode 100644 index 00000000..259efb5f --- /dev/null +++ b/vendor/ariga.io/atlas/sql/schema/BUILD @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "schema", + srcs = [ + "changekind_string.go", + "dsl.go", + "inspect.go", + "migrate.go", + "schema.go", + ], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/schema", + importpath = "ariga.io/atlas/sql/schema", + visibility = ["//visibility:public"], +) diff --git a/vendor/ariga.io/atlas/sql/schema/changekind_string.go b/vendor/ariga.io/atlas/sql/schema/changekind_string.go new file mode 100644 index 00000000..eec1ef9b --- /dev/null +++ b/vendor/ariga.io/atlas/sql/schema/changekind_string.go @@ -0,0 +1,55 @@ +// Code generated by "stringer -type ChangeKind"; DO NOT EDIT. + +package schema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NoChange-0] + _ = x[ChangeAttr-1] + _ = x[ChangeCharset-2] + _ = x[ChangeCollate-4] + _ = x[ChangeComment-8] + _ = x[ChangeNull-16] + _ = x[ChangeType-32] + _ = x[ChangeDefault-64] + _ = x[ChangeGenerated-128] + _ = x[ChangeUnique-256] + _ = x[ChangeParts-512] + _ = x[ChangeColumn-1024] + _ = x[ChangeRefColumn-2048] + _ = x[ChangeRefTable-4096] + _ = x[ChangeUpdateAction-8192] + _ = x[ChangeDeleteAction-16384] +} + +const _ChangeKind_name = "NoChangeChangeAttrChangeCharsetChangeCollateChangeCommentChangeNullChangeTypeChangeDefaultChangeGeneratedChangeUniqueChangePartsChangeColumnChangeRefColumnChangeRefTableChangeUpdateActionChangeDeleteAction" + +var _ChangeKind_map = map[ChangeKind]string{ + 0: _ChangeKind_name[0:8], + 1: _ChangeKind_name[8:18], + 2: _ChangeKind_name[18:31], + 4: _ChangeKind_name[31:44], + 8: _ChangeKind_name[44:57], + 16: _ChangeKind_name[57:67], + 32: _ChangeKind_name[67:77], + 64: _ChangeKind_name[77:90], + 128: _ChangeKind_name[90:105], + 256: _ChangeKind_name[105:117], + 512: _ChangeKind_name[117:128], + 1024: _ChangeKind_name[128:140], + 2048: _ChangeKind_name[140:155], + 4096: _ChangeKind_name[155:169], + 8192: _ChangeKind_name[169:187], + 16384: _ChangeKind_name[187:205], +} + +func (i ChangeKind) String() string { + if str, ok := _ChangeKind_map[i]; ok { + return str + } + return "ChangeKind(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/ariga.io/atlas/sql/schema/dsl.go b/vendor/ariga.io/atlas/sql/schema/dsl.go new file mode 100644 index 00000000..24399ce5 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/schema/dsl.go @@ -0,0 +1,774 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +import ( + "reflect" +) + +// The functions and methods below provide a DSL for creating schema resources using +// a fluent interface. Note that some methods create links between the schema elements. + +// New creates a new Schema. +func New(name string) *Schema { + return &Schema{Name: name} +} + +// SetCharset sets or appends the Charset attribute +// to the schema with the given value. +func (s *Schema) SetCharset(v string) *Schema { + ReplaceOrAppend(&s.Attrs, &Charset{V: v}) + return s +} + +// UnsetCharset unsets the Charset attribute. +func (s *Schema) UnsetCharset() *Schema { + del(&s.Attrs, &Charset{}) + return s +} + +// SetCollation sets or appends the Collation attribute +// to the schema with the given value. +func (s *Schema) SetCollation(v string) *Schema { + ReplaceOrAppend(&s.Attrs, &Collation{V: v}) + return s +} + +// UnsetCollation the Collation attribute. +func (s *Schema) UnsetCollation() *Schema { + del(&s.Attrs, &Collation{}) + return s +} + +// SetComment sets or appends the Comment attribute +// to the schema with the given value. +func (s *Schema) SetComment(v string) *Schema { + ReplaceOrAppend(&s.Attrs, &Comment{Text: v}) + return s +} + +// AddAttrs adds additional attributes to the schema. +func (s *Schema) AddAttrs(attrs ...Attr) *Schema { + s.Attrs = append(s.Attrs, attrs...) + return s +} + +// SetRealm sets the database/realm of the schema. +func (s *Schema) SetRealm(r *Realm) *Schema { + s.Realm = r + return s +} + +// AddTables adds and links the given tables to the schema. +func (s *Schema) AddTables(tables ...*Table) *Schema { + for _, t := range tables { + t.SetSchema(s) + } + s.Tables = append(s.Tables, tables...) + return s +} + +// NewRealm creates a new Realm. +func NewRealm(schemas ...*Schema) *Realm { + r := &Realm{Schemas: schemas} + for _, s := range schemas { + s.Realm = r + } + return r +} + +// AddSchemas adds and links the given schemas to the realm. +func (r *Realm) AddSchemas(schemas ...*Schema) *Realm { + for _, s := range schemas { + s.SetRealm(r) + } + r.Schemas = append(r.Schemas, schemas...) + return r +} + +// SetCharset sets or appends the Charset attribute +// to the realm with the given value. +func (r *Realm) SetCharset(v string) *Realm { + ReplaceOrAppend(&r.Attrs, &Charset{V: v}) + return r +} + +// UnsetCharset unsets the Charset attribute. +func (r *Realm) UnsetCharset() *Realm { + del(&r.Attrs, &Charset{}) + return r +} + +// SetCollation sets or appends the Collation attribute +// to the realm with the given value. +func (r *Realm) SetCollation(v string) *Realm { + ReplaceOrAppend(&r.Attrs, &Collation{V: v}) + return r +} + +// UnsetCollation the Collation attribute. +func (r *Realm) UnsetCollation() *Realm { + del(&r.Attrs, &Collation{}) + return r +} + +// NewTable creates a new Table. +func NewTable(name string) *Table { + return &Table{Name: name} +} + +// SetCharset sets or appends the Charset attribute +// to the table with the given value. +func (t *Table) SetCharset(v string) *Table { + ReplaceOrAppend(&t.Attrs, &Charset{V: v}) + return t +} + +// UnsetCharset unsets the Charset attribute. +func (t *Table) UnsetCharset() *Table { + del(&t.Attrs, &Charset{}) + return t +} + +// SetCollation sets or appends the Collation attribute +// to the table with the given value. +func (t *Table) SetCollation(v string) *Table { + ReplaceOrAppend(&t.Attrs, &Collation{V: v}) + return t +} + +// UnsetCollation the Collation attribute. +func (t *Table) UnsetCollation() *Table { + del(&t.Attrs, &Collation{}) + return t +} + +// SetComment sets or appends the Comment attribute +// to the table with the given value. +func (t *Table) SetComment(v string) *Table { + ReplaceOrAppend(&t.Attrs, &Comment{Text: v}) + return t +} + +// AddChecks appends the given checks to the attribute list. +func (t *Table) AddChecks(checks ...*Check) *Table { + for _, c := range checks { + t.Attrs = append(t.Attrs, c) + } + return t +} + +// SetSchema sets the schema (named-database) of the table. +func (t *Table) SetSchema(s *Schema) *Table { + t.Schema = s + return t +} + +// SetPrimaryKey sets the primary-key of the table. +func (t *Table) SetPrimaryKey(pk *Index) *Table { + pk.Table = t + t.PrimaryKey = pk + for _, p := range pk.Parts { + if p.C == nil { + continue + } + if _, ok := t.Column(p.C.Name); !ok { + t.AddColumns(p.C) + } + } + return t +} + +// AddColumns appends the given columns to the table column list. +func (t *Table) AddColumns(columns ...*Column) *Table { + t.Columns = append(t.Columns, columns...) + return t +} + +// AddIndexes appends the given indexes to the table index list. +func (t *Table) AddIndexes(indexes ...*Index) *Table { + for _, idx := range indexes { + idx.Table = t + } + t.Indexes = append(t.Indexes, indexes...) + return t +} + +// AddForeignKeys appends the given foreign-keys to the table foreign-key list. +func (t *Table) AddForeignKeys(fks ...*ForeignKey) *Table { + for _, fk := range fks { + fk.Table = t + } + t.ForeignKeys = append(t.ForeignKeys, fks...) + return t +} + +// AddAttrs adds and additional attributes to the table. +func (t *Table) AddAttrs(attrs ...Attr) *Table { + t.Attrs = append(t.Attrs, attrs...) + return t +} + +// NewColumn creates a new column with the given name. +func NewColumn(name string) *Column { + return &Column{Name: name} +} + +// NewNullColumn creates a new nullable column with the given name. +func NewNullColumn(name string) *Column { + return NewColumn(name). + SetNull(true) +} + +// NewBoolColumn creates a new BoolType column. +func NewBoolColumn(name, typ string) *Column { + return NewColumn(name). + SetType(&BoolType{T: typ}) +} + +// NewNullBoolColumn creates a new nullable BoolType column. +func NewNullBoolColumn(name, typ string) *Column { + return NewBoolColumn(name, typ). + SetNull(true) +} + +// NewIntColumn creates a new IntegerType column. +func NewIntColumn(name, typ string) *Column { + return NewColumn(name). + SetType(&IntegerType{T: typ}) +} + +// NewNullIntColumn creates a new nullable IntegerType column. +func NewNullIntColumn(name, typ string) *Column { + return NewIntColumn(name, typ). + SetNull(true) +} + +// NewUintColumn creates a new unsigned IntegerType column. +func NewUintColumn(name, typ string) *Column { + return NewColumn(name). + SetType(&IntegerType{T: typ, Unsigned: true}) +} + +// NewNullUintColumn creates a new nullable unsigned IntegerType column. +func NewNullUintColumn(name, typ string) *Column { + return NewUintColumn(name, typ). + SetNull(true) +} + +// EnumOption allows configuring EnumType using functional options. +type EnumOption func(*EnumType) + +// EnumName configures the name of the name. This option +// is useful for databases like PostgreSQL that supports +// user-defined types for enums. +func EnumName(name string) EnumOption { + return func(e *EnumType) { + e.T = name + } +} + +// EnumValues configures the values of the enum. +func EnumValues(values ...string) EnumOption { + return func(e *EnumType) { + e.Values = values + } +} + +// EnumSchema configures the schema of the enum. +func EnumSchema(s *Schema) EnumOption { + return func(e *EnumType) { + e.Schema = s + } +} + +// NewEnumColumn creates a new EnumType column. +func NewEnumColumn(name string, opts ...EnumOption) *Column { + t := &EnumType{} + for _, opt := range opts { + opt(t) + } + return NewColumn(name).SetType(t) +} + +// NewNullEnumColumn creates a new nullable EnumType column. +func NewNullEnumColumn(name string, opts ...EnumOption) *Column { + return NewEnumColumn(name, opts...). + SetNull(true) +} + +// BinaryOption allows configuring BinaryType using functional options. +type BinaryOption func(*BinaryType) + +// BinarySize configures the size of the binary type. +func BinarySize(size int) BinaryOption { + return func(b *BinaryType) { + b.Size = &size + } +} + +// NewBinaryColumn creates a new BinaryType column. +func NewBinaryColumn(name, typ string, opts ...BinaryOption) *Column { + t := &BinaryType{T: typ} + for _, opt := range opts { + opt(t) + } + return NewColumn(name).SetType(t) +} + +// NewNullBinaryColumn creates a new nullable BinaryType column. +func NewNullBinaryColumn(name, typ string, opts ...BinaryOption) *Column { + return NewBinaryColumn(name, typ, opts...). + SetNull(true) +} + +// StringOption allows configuring StringType using functional options. +type StringOption func(*StringType) + +// StringSize configures the size of the string type. +func StringSize(size int) StringOption { + return func(b *StringType) { + b.Size = size + } +} + +// NewStringColumn creates a new StringType column. +func NewStringColumn(name, typ string, opts ...StringOption) *Column { + t := &StringType{T: typ} + for _, opt := range opts { + opt(t) + } + return NewColumn(name).SetType(t) +} + +// NewNullStringColumn creates a new nullable StringType column. +func NewNullStringColumn(name, typ string, opts ...StringOption) *Column { + return NewStringColumn(name, typ, opts...). + SetNull(true) +} + +// DecimalOption allows configuring DecimalType using functional options. +type DecimalOption func(*DecimalType) + +// DecimalPrecision configures the precision of the decimal type. +func DecimalPrecision(precision int) DecimalOption { + return func(b *DecimalType) { + b.Precision = precision + } +} + +// DecimalScale configures the scale of the decimal type. +func DecimalScale(scale int) DecimalOption { + return func(b *DecimalType) { + b.Scale = scale + } +} + +// DecimalUnsigned configures the unsigned of the float type. +func DecimalUnsigned(unsigned bool) DecimalOption { + return func(b *DecimalType) { + b.Unsigned = unsigned + } +} + +// NewDecimalColumn creates a new DecimalType column. +func NewDecimalColumn(name, typ string, opts ...DecimalOption) *Column { + t := &DecimalType{T: typ} + for _, opt := range opts { + opt(t) + } + return NewColumn(name).SetType(t) +} + +// NewNullDecimalColumn creates a new nullable DecimalType column. +func NewNullDecimalColumn(name, typ string, opts ...DecimalOption) *Column { + return NewDecimalColumn(name, typ, opts...). + SetNull(true) +} + +// FloatOption allows configuring FloatType using functional options. +type FloatOption func(*FloatType) + +// FloatPrecision configures the precision of the float type. +func FloatPrecision(precision int) FloatOption { + return func(b *FloatType) { + b.Precision = precision + } +} + +// FloatUnsigned configures the unsigned of the float type. +func FloatUnsigned(unsigned bool) FloatOption { + return func(b *FloatType) { + b.Unsigned = unsigned + } +} + +// NewFloatColumn creates a new FloatType column. +func NewFloatColumn(name, typ string, opts ...FloatOption) *Column { + t := &FloatType{T: typ} + for _, opt := range opts { + opt(t) + } + return NewColumn(name).SetType(t) +} + +// NewNullFloatColumn creates a new nullable FloatType column. +func NewNullFloatColumn(name, typ string, opts ...FloatOption) *Column { + return NewFloatColumn(name, typ, opts...). + SetNull(true) +} + +// TimeOption allows configuring TimeType using functional options. +type TimeOption func(*TimeType) + +// TimePrecision configures the precision of the time type. +func TimePrecision(precision int) TimeOption { + return func(b *TimeType) { + b.Precision = &precision + } +} + +// NewTimeColumn creates a new TimeType column. +func NewTimeColumn(name, typ string, opts ...TimeOption) *Column { + t := &TimeType{T: typ} + for _, opt := range opts { + opt(t) + } + return NewColumn(name).SetType(t) +} + +// NewNullTimeColumn creates a new nullable TimeType column. +func NewNullTimeColumn(name, typ string) *Column { + return NewTimeColumn(name, typ). + SetNull(true) +} + +// NewJSONColumn creates a new JSONType column. +func NewJSONColumn(name, typ string) *Column { + return NewColumn(name). + SetType(&JSONType{T: typ}) +} + +// NewNullJSONColumn creates a new nullable JSONType column. +func NewNullJSONColumn(name, typ string) *Column { + return NewJSONColumn(name, typ). + SetNull(true) +} + +// NewSpatialColumn creates a new SpatialType column. +func NewSpatialColumn(name, typ string) *Column { + return NewColumn(name). + SetType(&SpatialType{T: typ}) +} + +// NewNullSpatialColumn creates a new nullable SpatialType column. +func NewNullSpatialColumn(name, typ string) *Column { + return NewSpatialColumn(name, typ). + SetNull(true) +} + +// SetNull configures the nullability of the column +func (c *Column) SetNull(b bool) *Column { + if c.Type == nil { + c.Type = &ColumnType{} + } + c.Type.Null = b + return c +} + +// SetType configures the type of the column +func (c *Column) SetType(t Type) *Column { + if c.Type == nil { + c.Type = &ColumnType{} + } + c.Type.Type = t + return c +} + +// SetDefault configures the default of the column +func (c *Column) SetDefault(x Expr) *Column { + c.Default = x + return c +} + +// SetCharset sets or appends the Charset attribute +// to the column with the given value. +func (c *Column) SetCharset(v string) *Column { + ReplaceOrAppend(&c.Attrs, &Charset{V: v}) + return c +} + +// UnsetCharset unsets the Charset attribute. +func (c *Column) UnsetCharset() *Column { + del(&c.Attrs, &Charset{}) + return c +} + +// SetCollation sets or appends the Collation attribute +// to the column with the given value. +func (c *Column) SetCollation(v string) *Column { + ReplaceOrAppend(&c.Attrs, &Collation{V: v}) + return c +} + +// UnsetCollation the Collation attribute. +func (c *Column) UnsetCollation() *Column { + del(&c.Attrs, &Collation{}) + return c +} + +// SetComment sets or appends the Comment attribute +// to the column with the given value. +func (c *Column) SetComment(v string) *Column { + ReplaceOrAppend(&c.Attrs, &Comment{Text: v}) + return c +} + +// SetGeneratedExpr sets or appends the GeneratedExpr attribute. +func (c *Column) SetGeneratedExpr(x *GeneratedExpr) *Column { + ReplaceOrAppend(&c.Attrs, x) + return c +} + +// AddAttrs adds additional attributes to the column. +func (c *Column) AddAttrs(attrs ...Attr) *Column { + c.Attrs = append(c.Attrs, attrs...) + return c +} + +// NewCheck creates a new check. +func NewCheck() *Check { + return &Check{} +} + +// SetName configures the name of the check constraint. +func (c *Check) SetName(name string) *Check { + c.Name = name + return c +} + +// SetExpr configures the expression of the check constraint. +func (c *Check) SetExpr(expr string) *Check { + c.Expr = expr + return c +} + +// AddAttrs adds additional attributes to the check constraint. +func (c *Check) AddAttrs(attrs ...Attr) *Check { + c.Attrs = append(c.Attrs, attrs...) + return c +} + +// NewIndex creates a new index with the given name. +func NewIndex(name string) *Index { + return &Index{Name: name} +} + +// NewUniqueIndex creates a new unique index with the given name. +func NewUniqueIndex(name string) *Index { + return NewIndex(name).SetUnique(true) +} + +// NewPrimaryKey creates a new primary-key index +// for the given columns. +func NewPrimaryKey(columns ...*Column) *Index { + return new(Index).SetUnique(true).AddColumns(columns...) +} + +// SetName configures the name of the index. +func (i *Index) SetName(name string) *Index { + i.Name = name + return i +} + +// SetUnique configures the uniqueness of the index. +func (i *Index) SetUnique(b bool) *Index { + i.Unique = b + return i +} + +// SetTable configures the table of the index. +func (i *Index) SetTable(t *Table) *Index { + i.Table = t + return i +} + +// SetComment sets or appends the Comment attribute +// to the index with the given value. +func (i *Index) SetComment(v string) *Index { + ReplaceOrAppend(&i.Attrs, &Comment{Text: v}) + return i +} + +// AddAttrs adds additional attributes to the index. +func (i *Index) AddAttrs(attrs ...Attr) *Index { + i.Attrs = append(i.Attrs, attrs...) + return i +} + +// AddColumns adds the columns to index parts. +func (i *Index) AddColumns(columns ...*Column) *Index { + for _, c := range columns { + if !c.hasIndex(i) { + c.Indexes = append(c.Indexes, i) + } + i.Parts = append(i.Parts, &IndexPart{SeqNo: len(i.Parts), C: c}) + } + return i +} + +func (c *Column) hasIndex(idx *Index) bool { + for i := range c.Indexes { + if c.Indexes[i] == idx { + return true + } + } + return false +} + +// AddExprs adds the expressions to index parts. +func (i *Index) AddExprs(exprs ...Expr) *Index { + for _, x := range exprs { + i.Parts = append(i.Parts, &IndexPart{SeqNo: len(i.Parts), X: x}) + } + return i +} + +// AddParts appends the given parts. +func (i *Index) AddParts(parts ...*IndexPart) *Index { + for _, p := range parts { + if p.C != nil && !p.C.hasIndex(i) { + p.C.Indexes = append(p.C.Indexes, i) + } + p.SeqNo = len(i.Parts) + i.Parts = append(i.Parts, p) + } + return i +} + +// NewIndexPart creates a new index part. +func NewIndexPart() *IndexPart { return &IndexPart{} } + +// NewColumnPart creates a new index part with the given column. +func NewColumnPart(c *Column) *IndexPart { return NewIndexPart().SetColumn(c) } + +// NewExprPart creates a new index part with the given expression. +func NewExprPart(x Expr) *IndexPart { return NewIndexPart().SetExpr(x) } + +// SetDesc configures the "DESC" attribute of the key part. +func (p *IndexPart) SetDesc(b bool) *IndexPart { + p.Desc = b + return p +} + +// AddAttrs adds and additional attributes to the index-part. +func (p *IndexPart) AddAttrs(attrs ...Attr) *IndexPart { + p.Attrs = append(p.Attrs, attrs...) + return p +} + +// SetColumn sets the column of the index-part. +func (p *IndexPart) SetColumn(c *Column) *IndexPart { + p.C = c + return p +} + +// SetExpr sets the expression of the index-part. +func (p *IndexPart) SetExpr(x Expr) *IndexPart { + p.X = x + return p +} + +// NewForeignKey creates a new foreign-key with +// the given constraints/symbol name. +func NewForeignKey(symbol string) *ForeignKey { + return &ForeignKey{Symbol: symbol} +} + +// SetTable configures the table that holds the foreign-key (child table). +func (f *ForeignKey) SetTable(t *Table) *ForeignKey { + f.Table = t + return f +} + +// AddColumns appends columns to the child-table columns. +func (f *ForeignKey) AddColumns(columns ...*Column) *ForeignKey { + for _, c := range columns { + if !c.hasForeignKey(f) { + c.ForeignKeys = append(c.ForeignKeys, f) + } + } + f.Columns = append(f.Columns, columns...) + return f +} + +func (c *Column) hasForeignKey(fk *ForeignKey) bool { + for i := range c.ForeignKeys { + if c.ForeignKeys[i] == fk { + return true + } + } + return false +} + +// SetRefTable configures the referenced/parent table. +func (f *ForeignKey) SetRefTable(t *Table) *ForeignKey { + f.RefTable = t + return f +} + +// AddRefColumns appends columns to the parent-table columns. +func (f *ForeignKey) AddRefColumns(columns ...*Column) *ForeignKey { + f.RefColumns = append(f.RefColumns, columns...) + return f +} + +// SetOnUpdate sets the ON UPDATE constraint action. +func (f *ForeignKey) SetOnUpdate(o ReferenceOption) *ForeignKey { + f.OnUpdate = o + return f +} + +// SetOnDelete sets the ON DELETE constraint action. +func (f *ForeignKey) SetOnDelete(o ReferenceOption) *ForeignKey { + f.OnDelete = o + return f +} + +// ReplaceOrAppend searches an attribute of the same type as v in +// the list and replaces it. Otherwise, v is appended to the list. +func ReplaceOrAppend(attrs *[]Attr, v Attr) { + t := reflect.TypeOf(v) + for i := range *attrs { + if reflect.TypeOf((*attrs)[i]) == t { + (*attrs)[i] = v + return + } + } + *attrs = append(*attrs, v) +} + +// RemoveAttr returns a new slice where all attributes of type T are filtered. +func RemoveAttr[T Attr](attrs []Attr) []Attr { + f := make([]Attr, 0, len(attrs)) + for _, a := range attrs { + if _, ok := a.(T); !ok { + f = append(f, a) + } + } + return f +} + +// del searches an attribute of the same type as v in +// the list and delete it. +func del(attrs *[]Attr, v Attr) { + t := reflect.TypeOf(v) + for i := range *attrs { + if reflect.TypeOf((*attrs)[i]) == t { + *attrs = append((*attrs)[:i], (*attrs)[i+1:]...) + return + } + } +} diff --git a/vendor/ariga.io/atlas/sql/schema/inspect.go b/vendor/ariga.io/atlas/sql/schema/inspect.go new file mode 100644 index 00000000..080794db --- /dev/null +++ b/vendor/ariga.io/atlas/sql/schema/inspect.go @@ -0,0 +1,121 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +import ( + "context" + "database/sql" + "errors" +) + +// A NotExistError wraps another error to retain its original text +// but makes it possible to the migrator to catch it. +type NotExistError struct { + Err error +} + +func (e NotExistError) Error() string { return e.Err.Error() } + +// IsNotExistError reports if an error is a NotExistError. +func IsNotExistError(err error) bool { + if err == nil { + return false + } + var e *NotExistError + return errors.As(err, &e) +} + +// ExecQuerier wraps the two standard sql.DB methods. +type ExecQuerier interface { + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) + ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) +} + +// An InspectMode controls the amount and depth of information returned on inspection. +type InspectMode uint + +const ( + // InspectSchemas enables schema inspection. + InspectSchemas InspectMode = 1 << iota + + // InspectTables enables schema tables inspection including + // all its child resources (e.g. columns or indexes). + InspectTables +) + +// Is reports whether the given mode is enabled. +func (m InspectMode) Is(i InspectMode) bool { return m&i != 0 } + +type ( + // InspectOptions describes options for Inspector. + InspectOptions struct { + // Mode defines the amount of information returned by InspectSchema. + // If zero, InspectSchema inspects whole resources in the schema. + Mode InspectMode + + // Tables to inspect. Empty means all tables in the schema. + Tables []string + + // Exclude defines a list of glob patterns used to filter resources from inspection. + // The syntax used by the different drivers is implemented as follows: + // + // t // exclude table 't'. + // * // exclude all tables. + // t.c // exclude column, index and foreign-key named 'c' in table 't'. + // t.* // the last item defines the filtering; all resources under 't' are excluded. + // *.c // the last item defines the filtering; all resourced named 'c' are excluded in all tables. + // *.* // the last item defines the filtering; all resourced under all tables are excluded. + // + Exclude []string + } + + // InspectRealmOption describes options for RealmInspector. + InspectRealmOption struct { + // Mode defines the amount of information returned by InspectRealm. + // If zero, InspectRealm inspects all schemas and their child resources. + Mode InspectMode + + // Schemas to inspect. Empty means all schemas in the realm. + Schemas []string + + // Exclude defines a list of glob patterns used to filter resources from inspection. + // The syntax used by the different drivers is implemented as follows: + // + // s // exclude schema 't'. + // * // exclude all schemas. + // s.t // exclude table 't' under schema 's'. + // s.* // the last item defines the filtering; all tables under 's' are excluded. + // *.t // the last item defines the filtering; all tables named 't' are excluded in all schemas. + // *.* // the last item defines the filtering; all tables under all schemas are excluded. + // *.*.c // the last item defines the filtering; all resourced named 'c' are excluded in all tables. + // *.*.* // the last item defines the filtering; all resources are excluded in all tables. + // + Exclude []string + } + + // Inspector is the interface implemented by the different database + // drivers for inspecting schema or databases. + Inspector interface { + // InspectSchema returns the schema description by its name. An empty name means the + // "attached schema" (e.g. SCHEMA() in MySQL or CURRENT_SCHEMA() in PostgreSQL). + // A NotExistError error is returned if the schema does not exist in the database. + InspectSchema(ctx context.Context, name string, opts *InspectOptions) (*Schema, error) + + // InspectRealm returns the description of the connected database. + InspectRealm(ctx context.Context, opts *InspectRealmOption) (*Realm, error) + } +) + +// Normalizer is the interface implemented by the different database drivers for +// "normalizing" schema objects. i.e. converting schema objects defined in natural +// form to their representation in the database. Thus, two schema objects are equal +// if their normal forms are equal. +type Normalizer interface { + // NormalizeSchema returns the normal representation of a schema. + NormalizeSchema(context.Context, *Schema) (*Schema, error) + + // NormalizeRealm returns the normal representation of a database. + NormalizeRealm(context.Context, *Realm) (*Realm, error) +} diff --git a/vendor/ariga.io/atlas/sql/schema/migrate.go b/vendor/ariga.io/atlas/sql/schema/migrate.go new file mode 100644 index 00000000..45958a0c --- /dev/null +++ b/vendor/ariga.io/atlas/sql/schema/migrate.go @@ -0,0 +1,457 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +import ( + "context" + "errors" + "time" +) + +type ( + // A Change represents a schema change. The types below implement this + // interface and can be used for describing schema changes. + // + // The Change interface can also be implemented outside this package + // as follows: + // + // type RenameType struct { + // schema.Change + // From, To string + // } + // + // var t schema.Change = &RenameType{From: "old", To: "new"} + // + Change interface { + change() + } + + // Clause carries additional information that can be added + // to schema changes. The Clause interface can be implemented + // outside this package as follows: + // + // type Authorization struct { + // schema.Clause + // UserName string + // } + // + // var c schema.Clause = &Authorization{UserName: "a8m"} + // + Clause interface { + clause() + } + + // AddSchema describes a schema (named database) creation change. + // Unlike table creation, schemas and their elements are described + // with separate changes. For example, "AddSchema" and "AddTable" + AddSchema struct { + S *Schema + Extra []Clause // Extra clauses and options. + } + + // DropSchema describes a schema (named database) removal change. + DropSchema struct { + S *Schema + Extra []Clause // Extra clauses and options. + } + + // ModifySchema describes a modification change for schema attributes. + ModifySchema struct { + S *Schema + Changes []Change + } + + // AddTable describes a table creation change. + AddTable struct { + T *Table + Extra []Clause // Extra clauses and options. + } + + // DropTable describes a table removal change. + DropTable struct { + T *Table + Extra []Clause // Extra clauses. + } + + // ModifyTable describes a table modification change. + ModifyTable struct { + T *Table + Changes []Change + } + + // RenameTable describes a table rename change. + RenameTable struct { + From, To *Table + } + + // AddColumn describes a column creation change. + AddColumn struct { + C *Column + } + + // DropColumn describes a column removal change. + DropColumn struct { + C *Column + } + + // ModifyColumn describes a change that modifies a column. + ModifyColumn struct { + From, To *Column + Change ChangeKind + } + + // RenameColumn describes a column rename change. + RenameColumn struct { + From, To *Column + } + + // AddIndex describes an index creation change. + AddIndex struct { + I *Index + Extra []Clause // Extra clauses and options. + } + + // DropIndex describes an index removal change. + DropIndex struct { + I *Index + Extra []Clause // Extra clauses and options. + } + + // ModifyIndex describes an index modification. + ModifyIndex struct { + From, To *Index + Change ChangeKind + } + + // RenameIndex describes an index rename change. + RenameIndex struct { + From, To *Index + } + + // AddPrimaryKey describes a primary-key creation change. + AddPrimaryKey struct { + P *Index + } + + // DropPrimaryKey describes a primary-key removal change. + DropPrimaryKey struct { + P *Index + } + + // ModifyPrimaryKey describes a primary-key modification. + ModifyPrimaryKey struct { + From, To *Index + Change ChangeKind + } + + // AddForeignKey describes a foreign-key creation change. + AddForeignKey struct { + F *ForeignKey + } + + // DropForeignKey describes a foreign-key removal change. + DropForeignKey struct { + F *ForeignKey + } + + // ModifyForeignKey describes a change that modifies a foreign-key. + ModifyForeignKey struct { + From, To *ForeignKey + Change ChangeKind + } + + // AddCheck describes a CHECK constraint creation change. + AddCheck struct { + C *Check + } + + // DropCheck describes a CHECK constraint removal change. + DropCheck struct { + C *Check + } + + // ModifyCheck describes a change that modifies a check. + ModifyCheck struct { + From, To *Check + Change ChangeKind + } + + // AddAttr describes an attribute addition. + AddAttr struct { + A Attr + } + + // DropAttr describes an attribute removal. + DropAttr struct { + A Attr + } + + // ModifyAttr describes a change that modifies an element attribute. + ModifyAttr struct { + From, To Attr + } + + // IfExists represents a clause in a schema change that is commonly + // supported by multiple statements (e.g. DROP TABLE or DROP SCHEMA). + IfExists struct{} + + // IfNotExists represents a clause in a schema change that is commonly + // supported by multiple statements (e.g. CREATE TABLE or CREATE SCHEMA). + IfNotExists struct{} +) + +// A ChangeKind describes a change kind that can be combined +// using a set of flags. The zero kind is no change. +// +//go:generate stringer -type ChangeKind +type ChangeKind uint + +const ( + // NoChange holds the zero value of a change kind. + NoChange ChangeKind = 0 + + // Common changes. + + // ChangeAttr describes attributes change of an element. + // For example, a table CHECK was added or changed. + ChangeAttr ChangeKind = 1 << (iota - 1) + // ChangeCharset describes character-set change. + ChangeCharset + // ChangeCollate describes collation/encoding change. + ChangeCollate + // ChangeComment describes comment chang (of any element). + ChangeComment + + // Column specific changes. + + // ChangeNull describe a change to the NULL constraint. + ChangeNull + // ChangeType describe a column type change. + ChangeType + // ChangeDefault describe a column default change. + ChangeDefault + // ChangeGenerated describe a change to the generated expression. + ChangeGenerated + + // Index specific changes. + + // ChangeUnique describes a change to the uniqueness constraint. + // For example, an index was changed from non-unique to unique. + ChangeUnique + // ChangeParts describes a change to one or more of the index parts. + // For example, index keeps its previous name, but the columns order + // was changed. + ChangeParts + + // Foreign key specific changes. + + // ChangeColumn describes a change to the foreign-key (child) columns. + ChangeColumn + // ChangeRefColumn describes a change to the foreign-key (parent) columns. + ChangeRefColumn + // ChangeRefTable describes a change to the foreign-key (parent) table. + ChangeRefTable + // ChangeUpdateAction describes a change to the foreign-key update action. + ChangeUpdateAction + // ChangeDeleteAction describes a change to the foreign-key delete action. + ChangeDeleteAction +) + +// Is reports whether c is match the given change kind. +func (k ChangeKind) Is(c ChangeKind) bool { + return k == c || k&c != 0 +} + +// Differ is the interface implemented by the different +// drivers for comparing and diffing schema top elements. +type Differ interface { + // RealmDiff returns a diff report for migrating a realm + // (or a database) from state "from" to state "to". An error + // is returned if such step is not possible. + RealmDiff(from, to *Realm) ([]Change, error) + + // SchemaDiff returns a diff report for migrating a schema + // from state "from" to state "to". An error is returned + // if such step is not possible. + SchemaDiff(from, to *Schema) ([]Change, error) + + // TableDiff returns a diff report for migrating a table + // from state "from" to state "to". An error is returned + // if such step is not possible. + TableDiff(from, to *Table) ([]Change, error) +} + +// ErrLocked is returned on Lock calls which have failed to obtain the lock. +var ErrLocked = errors.New("sql/schema: lock is held by other session") + +type ( + // UnlockFunc is returned by the Locker to explicitly + // release the named "advisory lock". + UnlockFunc func() error + + // Locker is an interface that is optionally implemented by the different drivers + // for obtaining an "advisory lock" with the given name. + Locker interface { + // Lock acquires a named "advisory lock", using the given timeout. Negative value means no timeout, + // and the zero value means a "try lock" mode. i.e. return immediately if the lock is already taken. + // The returned unlock function is used to release the advisory lock acquired by the session. + // + // An ErrLocked is returned if the operation failed to obtain the lock in all different timeout modes. + Lock(ctx context.Context, name string, timeout time.Duration) (UnlockFunc, error) + } +) + +// Changes is a list of changes allow for searching and mutating changes. +type Changes []Change + +// IndexAddTable returns the index of the first AddTable in the changes +// with the given name, or -1 if there is no such change in the Changes. +func (c Changes) IndexAddTable(name string) int { + return c.search(func(c Change) bool { + a, ok := c.(*AddTable) + return ok && a.T.Name == name + }) +} + +// IndexDropTable returns the index of the first DropTable in the changes +// with the given name, or -1 if there is no such change in the Changes. +func (c Changes) IndexDropTable(name string) int { + return c.search(func(c Change) bool { + a, ok := c.(*DropTable) + return ok && a.T.Name == name + }) +} + +// LastIndexAddTable returns the index of the last AddTable in the changes +// with the given name, or -1 if there is no such change in the Changes. +func (c Changes) LastIndexAddTable(name string) int { + return c.rsearch(func(c Change) bool { + a, ok := c.(*AddTable) + return ok && a.T.Name == name + }) +} + +// LastIndexDropTable returns the index of the last DropTable in the changes +// with the given name, or -1 if there is no such change in the Changes. +func (c Changes) LastIndexDropTable(name string) int { + return c.rsearch(func(c Change) bool { + a, ok := c.(*DropTable) + return ok && a.T.Name == name + }) +} + +// IndexAddColumn returns the index of the first AddColumn in the changes +// with the given name, or -1 if there is no such change in the Changes. +func (c Changes) IndexAddColumn(name string) int { + return c.search(func(c Change) bool { + a, ok := c.(*AddColumn) + return ok && a.C.Name == name + }) +} + +// IndexDropColumn returns the index of the first DropColumn in the changes +// with the given name, or -1 if there is no such change in the Changes. +func (c Changes) IndexDropColumn(name string) int { + return c.search(func(c Change) bool { + d, ok := c.(*DropColumn) + return ok && d.C.Name == name + }) +} + +// IndexModifyColumn returns the index of the first ModifyColumn in the changes +// with the given name, or -1 if there is no such change in the Changes. +func (c Changes) IndexModifyColumn(name string) int { + return c.search(func(c Change) bool { + a, ok := c.(*ModifyColumn) + return ok && a.From.Name == name + }) +} + +// IndexAddIndex returns the index of the first AddIndex in the changes +// with the given name, or -1 if there is no such change in the Changes. +func (c Changes) IndexAddIndex(name string) int { + return c.search(func(c Change) bool { + a, ok := c.(*AddIndex) + return ok && a.I.Name == name + }) +} + +// IndexDropIndex returns the index of the first DropIndex in the changes +// with the given name, or -1 if there is no such change in the Changes. +func (c Changes) IndexDropIndex(name string) int { + return c.search(func(c Change) bool { + a, ok := c.(*DropIndex) + return ok && a.I.Name == name + }) +} + +// RemoveIndex removes elements in the given indexes from the Changes. +func (c *Changes) RemoveIndex(indexes ...int) { + changes := make([]Change, 0, len(*c)-len(indexes)) +Loop: + for i := range *c { + for _, idx := range indexes { + if i == idx { + continue Loop + } + } + changes = append(changes, (*c)[i]) + } + *c = changes +} + +// search returns the index of the first call to f that returns true, or -1. +func (c Changes) search(f func(Change) bool) int { + for i := range c { + if f(c[i]) { + return i + } + } + return -1 +} + +// rsearch is the reversed version of search. It returns the +// index of the last call to f that returns true, or -1. +func (c Changes) rsearch(f func(Change) bool) int { + for i := len(c) - 1; i >= 0; i-- { + if f(c[i]) { + return i + } + } + return -1 +} + +// changes. +func (*AddAttr) change() {} +func (*DropAttr) change() {} +func (*ModifyAttr) change() {} +func (*AddSchema) change() {} +func (*DropSchema) change() {} +func (*ModifySchema) change() {} +func (*AddTable) change() {} +func (*DropTable) change() {} +func (*ModifyTable) change() {} +func (*RenameTable) change() {} +func (*AddIndex) change() {} +func (*DropIndex) change() {} +func (*ModifyIndex) change() {} +func (*RenameIndex) change() {} +func (*AddPrimaryKey) change() {} +func (*DropPrimaryKey) change() {} +func (*ModifyPrimaryKey) change() {} +func (*AddCheck) change() {} +func (*DropCheck) change() {} +func (*ModifyCheck) change() {} +func (*AddColumn) change() {} +func (*DropColumn) change() {} +func (*ModifyColumn) change() {} +func (*RenameColumn) change() {} +func (*AddForeignKey) change() {} +func (*DropForeignKey) change() {} +func (*ModifyForeignKey) change() {} + +// clauses. +func (*IfExists) clause() {} +func (*IfNotExists) clause() {} diff --git a/vendor/ariga.io/atlas/sql/schema/schema.go b/vendor/ariga.io/atlas/sql/schema/schema.go new file mode 100644 index 00000000..42014b8e --- /dev/null +++ b/vendor/ariga.io/atlas/sql/schema/schema.go @@ -0,0 +1,339 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +type ( + // A Realm or a database describes a domain of schema resources that are logically connected + // and can be accessed and queried in the same connection (e.g. a physical database instance). + Realm struct { + Schemas []*Schema + Attrs []Attr + } + + // A Schema describes a database schema (i.e. named database). + Schema struct { + Name string + Realm *Realm + Tables []*Table + Attrs []Attr // Attrs and options. + } + + // A Table represents a table definition. + Table struct { + Name string + Schema *Schema + Columns []*Column + Indexes []*Index + PrimaryKey *Index + ForeignKeys []*ForeignKey + Attrs []Attr // Attrs, constraints and options. + } + + // A Column represents a column definition. + Column struct { + Name string + Type *ColumnType + Default Expr + Attrs []Attr + Indexes []*Index + // Foreign keys that this column is + // part of their child columns. + ForeignKeys []*ForeignKey + } + + // ColumnType represents a column type that is implemented by the dialect. + ColumnType struct { + Type Type + Raw string + Null bool + } + + // An Index represents an index definition. + Index struct { + Name string + Unique bool + Table *Table + Attrs []Attr + Parts []*IndexPart + } + + // An IndexPart represents an index part that + // can be either an expression or a column. + IndexPart struct { + // SeqNo represents the sequence number of the key part + // in the index. + SeqNo int + // Desc indicates if the key part is stored in descending + // order. All databases use ascending order as default. + Desc bool + X Expr + C *Column + Attrs []Attr + } + + // A ForeignKey represents an index definition. + ForeignKey struct { + Symbol string + Table *Table + Columns []*Column + RefTable *Table + RefColumns []*Column + OnUpdate ReferenceOption + OnDelete ReferenceOption + } +) + +// Schema returns the first schema that matched the given name. +func (r *Realm) Schema(name string) (*Schema, bool) { + for _, s := range r.Schemas { + if s.Name == name { + return s, true + } + } + return nil, false +} + +// Table returns the first table that matched the given name. +func (s *Schema) Table(name string) (*Table, bool) { + for _, t := range s.Tables { + if t.Name == name { + return t, true + } + } + return nil, false +} + +// Column returns the first column that matched the given name. +func (t *Table) Column(name string) (*Column, bool) { + for _, c := range t.Columns { + if c.Name == name { + return c, true + } + } + return nil, false +} + +// Index returns the first index that matched the given name. +func (t *Table) Index(name string) (*Index, bool) { + for _, i := range t.Indexes { + if i.Name == name { + return i, true + } + } + return nil, false +} + +// ForeignKey returns the first foreign-key that matched the given symbol (constraint name). +func (t *Table) ForeignKey(symbol string) (*ForeignKey, bool) { + for _, f := range t.ForeignKeys { + if f.Symbol == symbol { + return f, true + } + } + return nil, false +} + +// Column returns the first column that matches the given name. +func (f *ForeignKey) Column(name string) (*Column, bool) { + for _, c := range f.Columns { + if c.Name == name { + return c, true + } + } + return nil, false +} + +// RefColumn returns the first referenced column that matches the given name. +func (f *ForeignKey) RefColumn(name string) (*Column, bool) { + for _, c := range f.RefColumns { + if c.Name == name { + return c, true + } + } + return nil, false +} + +// ReferenceOption for constraint actions. +type ReferenceOption string + +// Reference options (actions) specified by ON UPDATE and ON DELETE +// subclauses of the FOREIGN KEY clause. +const ( + NoAction ReferenceOption = "NO ACTION" + Restrict ReferenceOption = "RESTRICT" + Cascade ReferenceOption = "CASCADE" + SetNull ReferenceOption = "SET NULL" + SetDefault ReferenceOption = "SET DEFAULT" +) + +type ( + // A Type represents a database type. The types below implements this + // interface and can be used for describing schemas. + // + // The Type interface can also be implemented outside this package as follows: + // + // type SpatialType struct { + // schema.Type + // T string + // } + // + // var t schema.Type = &SpatialType{T: "point"} + // + Type interface { + typ() + } + + // EnumType represents an enum type. + EnumType struct { + T string // Optional type. + Values []string // Enum values. + Schema *Schema // Optional schema. + } + + // BinaryType represents a type that stores a binary data. + BinaryType struct { + T string + Size *int + } + + // StringType represents a string type. + StringType struct { + T string + Size int + } + + // BoolType represents a boolean type. + BoolType struct { + T string + } + + // IntegerType represents an int type. + IntegerType struct { + T string + Unsigned bool + Attrs []Attr + } + + // DecimalType represents a fixed-point type that stores exact numeric values. + DecimalType struct { + T string + Precision int + Scale int + Unsigned bool + } + + // FloatType represents a floating-point type that stores approximate numeric values. + FloatType struct { + T string + Unsigned bool + Precision int + } + + // TimeType represents a date/time type. + TimeType struct { + T string + Precision *int + } + + // JSONType represents a JSON type. + JSONType struct { + T string + } + + // SpatialType represents a spatial/geometric type. + SpatialType struct { + T string + } + + // A UUIDType defines a UUID type. + UUIDType struct { + T string + } + + // UnsupportedType represents a type that is not supported by the drivers. + UnsupportedType struct { + T string + } +) + +type ( + // Expr defines an SQL expression in schema DDL. + Expr interface { + expr() + } + + // Literal represents a basic literal expression like 1, or '1'. + // String literals are usually quoted with single or double quotes. + Literal struct { + V string + } + + // RawExpr represents a raw expression like "uuid()" or "current_timestamp()". + // Unlike literals, raw expression are usually inlined as is on migration. + RawExpr struct { + X string + } +) + +type ( + // Attr represents the interface that all attributes implement. + Attr interface { + attr() + } + + // Comment describes a schema element comment. + Comment struct { + Text string + } + + // Charset describes a column or a table character-set setting. + Charset struct { + V string + } + + // Collation describes a column or a table collation setting. + Collation struct { + V string + } + + // Check describes a CHECK constraint. + Check struct { + Name string // Optional constraint name. + Expr string // Actual CHECK. + Attrs []Attr // Additional attributes (e.g. ENFORCED). + } + + // GeneratedExpr describes the expression used for generating + // the value of a generated/virtual column. + GeneratedExpr struct { + Expr string + Type string // Optional type. e.g. STORED or VIRTUAL. + } +) + +// expressions. +func (*Literal) expr() {} +func (*RawExpr) expr() {} + +// types. +func (*BoolType) typ() {} +func (*EnumType) typ() {} +func (*TimeType) typ() {} +func (*JSONType) typ() {} +func (*FloatType) typ() {} +func (*StringType) typ() {} +func (*BinaryType) typ() {} +func (*SpatialType) typ() {} +func (*UUIDType) typ() {} +func (*IntegerType) typ() {} +func (*DecimalType) typ() {} +func (*UnsupportedType) typ() {} + +// attributes. +func (*Check) attr() {} +func (*Comment) attr() {} +func (*Charset) attr() {} +func (*Collation) attr() {} +func (*GeneratedExpr) attr() {} diff --git a/vendor/ariga.io/atlas/sql/sqlclient/BUILD b/vendor/ariga.io/atlas/sql/sqlclient/BUILD new file mode 100644 index 00000000..8b2defcf --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlclient/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sqlclient", + srcs = ["client.go"], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/sqlclient", + importpath = "ariga.io/atlas/sql/sqlclient", + visibility = ["//visibility:public"], + deps = [ + "//vendor/ariga.io/atlas/schemahcl", + "//vendor/ariga.io/atlas/sql/migrate", + "//vendor/ariga.io/atlas/sql/schema", + ], +) diff --git a/vendor/ariga.io/atlas/sql/sqlclient/client.go b/vendor/ariga.io/atlas/sql/sqlclient/client.go new file mode 100644 index 00000000..8f677ceb --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlclient/client.go @@ -0,0 +1,404 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlclient + +import ( + "context" + "database/sql" + "errors" + "fmt" + "io" + "net/url" + "sync" + + "ariga.io/atlas/schemahcl" + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" +) + +type ( + // Client provides the common functionalities for working with Atlas from different + // applications (e.g. CLI and TF). Note, the Client is dialect specific and should + // be instantiated using a call to Open. + Client struct { + // Name used when creating the client. + Name string + + // DB used for creating the client. + DB *sql.DB + // URL holds an enriched url.URL. + URL *URL + + // A migration driver for the attached dialect. + migrate.Driver + // Additional closers that can be closed at the + // end of the client lifetime. + closers []io.Closer + + // Marshal and Evaluator functions for decoding + // and encoding the schema documents. + schemahcl.Marshaler + schemahcl.Evaluator + + // Functions registered by the drivers and used for opening transactions and their clients. + openDriver func(schema.ExecQuerier) (migrate.Driver, error) + openTx TxOpener + } + + // TxClient is returned by calling Client.Tx. It behaves the same as Client, + // but wraps all operations within a transaction. + TxClient struct { + *Client + + // The transaction this Client wraps. + Tx *Tx + } + + // URL extends the standard url.URL with additional + // connection information attached by the Opener (if any). + URL struct { + *url.URL + + // The DSN used for opening the connection. + DSN string `json:"-"` + + // The Schema this client is connected to. + Schema string + } +) + +// Tx returns a transactional client. +func (c *Client) Tx(ctx context.Context, opts *sql.TxOptions) (*TxClient, error) { + if c.openDriver == nil { + return nil, errors.New("sql/sqlclient: unexpected driver opener: ") + } + var tx *Tx + switch { + case c.openTx != nil: + ttx, err := c.openTx(ctx, c.DB, opts) + if err != nil { + return nil, err + } + tx = ttx + default: + ttx, err := c.DB.BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("sql/sqlclient: starting transaction: %w", err) + } + tx = &Tx{Tx: ttx} + } + drv, err := c.openDriver(tx) + if err != nil { + return nil, fmt.Errorf("sql/sqlclient: opening atlas driver: %w", err) + } + ic := *c + ic.Driver = drv + return &TxClient{Client: &ic, Tx: tx}, nil +} + +// Commit the transaction. +func (c *TxClient) Commit() error { + return c.Tx.Commit() +} + +// Rollback the transaction. +func (c *TxClient) Rollback() error { + return c.Tx.Rollback() +} + +// AddClosers adds list of closers to close at the end of the client lifetime. +func (c *Client) AddClosers(closers ...io.Closer) { + c.closers = append(c.closers, closers...) +} + +// Close closes the underlying database connection and the migration +// driver in case it implements the io.Closer interface. +func (c *Client) Close() (err error) { + for _, closer := range append(c.closers, c.DB) { + if cerr := closer.Close(); cerr != nil { + if err != nil { + cerr = fmt.Errorf("%v: %v", err, cerr) + } + err = cerr + } + } + return err +} + +type ( + // Opener opens a migration driver by the given URL. + Opener interface { + Open(ctx context.Context, u *url.URL) (*Client, error) + } + + // OpenerFunc allows using a function as an Opener. + OpenerFunc func(context.Context, *url.URL) (*Client, error) + + // URLParser parses an url.URL into an enriched URL and attaches additional info to it. + URLParser interface { + ParseURL(*url.URL) *URL + } + + // URLParserFunc allows using a function as an URLParser. + URLParserFunc func(*url.URL) *URL + + // SchemaChanger is implemented by a driver if it how to change the connection URL to represent another schema. + SchemaChanger interface { + ChangeSchema(*url.URL, string) *url.URL + } + + driver struct { + Opener + name string + parser URLParser + txOpener TxOpener + } +) + +// Open calls f(ctx, u). +func (f OpenerFunc) Open(ctx context.Context, u *url.URL) (*Client, error) { + return f(ctx, u) +} + +// ParseURL calls f(u). +func (f URLParserFunc) ParseURL(u *url.URL) *URL { + return f(u) +} + +var drivers sync.Map + +type ( + // openOptions holds additional configuration values for opening a Client. + openOptions struct { + schema *string + } + + // OpenOption allows to configure a openOptions using functional arguments. + OpenOption func(*openOptions) error +) + +// ErrUnsupported is returned if a registered driver does not support changing the schema. +var ErrUnsupported = errors.New("sql/sqlclient: driver does not support changing connected schema") + +// Open opens an Atlas client by its provided url string. +func Open(ctx context.Context, s string, opts ...OpenOption) (*Client, error) { + u, err := url.Parse(s) + if err != nil { + return nil, fmt.Errorf("sql/sqlclient: parse open url: %w", err) + } + return OpenURL(ctx, u, opts...) +} + +// OpenURL opens an Atlas client by its provided url.URL. +func OpenURL(ctx context.Context, u *url.URL, opts ...OpenOption) (*Client, error) { + cfg := &openOptions{} + for _, opt := range opts { + if err := opt(cfg); err != nil { + return nil, err + } + } + if u.Scheme == "" { + return nil, errors.New("sql/sqlclient: missing driver. See: https://atlasgo.io/url") + } + v, ok := drivers.Load(u.Scheme) + if !ok { + return nil, fmt.Errorf("sql/sqlclient: unknown driver %q. See: https://atlasgo.io/url", u.Scheme) + } + drv := v.(*driver) + // If there is a schema given and the driver allows to change the schema for the url, do it. + if cfg.schema != nil { + sc, ok := drv.parser.(SchemaChanger) + if !ok { + return nil, ErrUnsupported + } + u = sc.ChangeSchema(u, *cfg.schema) + } + client, err := drv.Open(ctx, u) + if err != nil { + return nil, err + } + if client.URL == nil { + client.URL = drv.parser.ParseURL(u) + } + if client.openTx == nil && drv.txOpener != nil { + client.openTx = drv.txOpener + } + return client, nil +} + +// OpenSchema opens the connection to the given schema. +// If the registered driver does not support this, ErrUnsupported is returned instead. +func OpenSchema(s string) OpenOption { + return func(c *openOptions) error { + c.schema = &s + return nil + } +} + +type ( + registerOptions struct { + openDriver func(schema.ExecQuerier) (migrate.Driver, error) + txOpener TxOpener + parser URLParser + flavours []string + codec interface { + schemahcl.Marshaler + schemahcl.Evaluator + } + } + // RegisterOption allows configuring the Opener + // registration using functional options. + RegisterOption func(*registerOptions) +) + +// RegisterFlavours allows registering additional flavours +// (i.e. names), accepted by Atlas to open clients. +func RegisterFlavours(flavours ...string) RegisterOption { + return func(opts *registerOptions) { + opts.flavours = flavours + } +} + +// RegisterURLParser allows registering a function for parsing +// the url.URL and attach additional info to the extended URL. +func RegisterURLParser(p URLParser) RegisterOption { + return func(opts *registerOptions) { + opts.parser = p + } +} + +// RegisterCodec registers static codec for attaching into +// the client after it is opened. +func RegisterCodec(m schemahcl.Marshaler, e schemahcl.Evaluator) RegisterOption { + return func(opts *registerOptions) { + opts.codec = struct { + schemahcl.Marshaler + schemahcl.Evaluator + }{ + Marshaler: m, + Evaluator: e, + } + } +} + +// RegisterDriverOpener registers a func to create a migrate.Driver from a schema.ExecQuerier. +// Registering this function is implicitly done when using DriverOpener. +// The passed opener is used when creating a TxClient. +func RegisterDriverOpener(open func(schema.ExecQuerier) (migrate.Driver, error)) RegisterOption { + return func(opts *registerOptions) { + opts.openDriver = open + } +} + +// DriverOpener is a helper Opener creator for sharing between all drivers. +func DriverOpener(open func(schema.ExecQuerier) (migrate.Driver, error)) Opener { + return OpenerFunc(func(_ context.Context, u *url.URL) (*Client, error) { + v, ok := drivers.Load(u.Scheme) + if !ok { + return nil, fmt.Errorf("sql/sqlclient: unexpected missing opener %q", u.Scheme) + } + drv := v.(*driver) + ur := drv.parser.ParseURL(u) + db, err := sql.Open(drv.name, ur.DSN) + if err != nil { + return nil, err + } + mdr, err := open(db) + if err != nil { + if cerr := db.Close(); cerr != nil { + err = fmt.Errorf("%w: %v", err, cerr) + } + return nil, err + } + return &Client{ + Name: drv.name, + DB: db, + URL: ur, + Driver: mdr, + openDriver: open, + openTx: drv.txOpener, + }, nil + }) +} + +type ( + // Tx wraps sql.Tx with optional custom Commit and Rollback functions. + Tx struct { + *sql.Tx + CommitFn func() error // override default commit behavior + RollbackFn func() error // override default rollback behavior + } + // TxOpener opens a transaction with optional closer. + TxOpener func(context.Context, *sql.DB, *sql.TxOptions) (*Tx, error) +) + +// Commit the transaction. +func (tx *Tx) Commit() error { + fn := tx.CommitFn + if fn == nil { + fn = tx.Tx.Commit + } + return fn() +} + +// Rollback the transaction. +func (tx *Tx) Rollback() error { + fn := tx.RollbackFn + if fn == nil { + fn = tx.Tx.Rollback + } + return fn() +} + +// RegisterTxOpener allows registering a custom transaction opener with an optional close function. +func RegisterTxOpener(open TxOpener) RegisterOption { + return func(opts *registerOptions) { + opts.txOpener = open + } +} + +// Register registers a client Opener (i.e. creator) with the given name. +func Register(name string, opener Opener, opts ...RegisterOption) { + if opener == nil { + panic("sql/sqlclient: Register opener is nil") + } + opt := ®isterOptions{ + // Default URL parser uses the URL as the DSN. + parser: URLParserFunc(func(u *url.URL) *URL { return &URL{URL: u, DSN: u.String()} }), + } + for i := range opts { + opts[i](opt) + } + if opt.codec != nil { + f := opener + opener = OpenerFunc(func(ctx context.Context, u *url.URL) (*Client, error) { + c, err := f.Open(ctx, u) + if err != nil { + return nil, err + } + c.Marshaler, c.Evaluator = opt.codec, opt.codec + return c, nil + }) + } + // If there was a driver opener registered by a call to RegisterDriverOpener, it has precedence. + if opt.openDriver != nil { + f := opener + opener = OpenerFunc(func(ctx context.Context, u *url.URL) (*Client, error) { + c, err := f.Open(ctx, u) + if err != nil { + return nil, err + } + c.openDriver = opt.openDriver + return c, err + }) + } + drv := &driver{Opener: opener, name: name, parser: opt.parser, txOpener: opt.txOpener} + for _, f := range append(opt.flavours, name) { + if _, ok := drivers.Load(f); ok { + panic("sql/sqlclient: Register called twice for " + f) + } + drivers.Store(f, drv) + } +} diff --git a/vendor/ariga.io/atlas/sql/sqlite/BUILD b/vendor/ariga.io/atlas/sql/sqlite/BUILD new file mode 100644 index 00000000..59151fd3 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlite/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sqlite", + srcs = [ + "convert.go", + "diff.go", + "driver.go", + "inspect.go", + "migrate.go", + "sqlspec.go", + ], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/sqlite", + importpath = "ariga.io/atlas/sql/sqlite", + visibility = ["//visibility:public"], + deps = [ + "//vendor/ariga.io/atlas/schemahcl", + "//vendor/ariga.io/atlas/sql/internal/specutil", + "//vendor/ariga.io/atlas/sql/internal/sqlx", + "//vendor/ariga.io/atlas/sql/migrate", + "//vendor/ariga.io/atlas/sql/schema", + "//vendor/ariga.io/atlas/sql/sqlclient", + "//vendor/ariga.io/atlas/sql/sqlspec", + "//vendor/github.com/hashicorp/hcl/v2/hclparse", + "//vendor/github.com/zclconf/go-cty/cty", + ], +) diff --git a/vendor/ariga.io/atlas/sql/sqlite/convert.go b/vendor/ariga.io/atlas/sql/sqlite/convert.go new file mode 100644 index 00000000..07394e6e --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlite/convert.go @@ -0,0 +1,107 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlite + +import ( + "fmt" + "strconv" + "strings" + + "ariga.io/atlas/sql/schema" +) + +// FormatType converts types to one format. A lowered format. +// This is due to SQLite flexibility to allow any data types +// and use a set of rules to define the type affinity. +// See: https://www.sqlite.org/datatype3.html +func FormatType(t schema.Type) (string, error) { + var f string + switch t := t.(type) { + case *schema.BoolType: + f = strings.ToLower(t.T) + case *schema.BinaryType: + f = strings.ToLower(t.T) + case *schema.EnumType: + f = t.T + case *schema.IntegerType: + f = strings.ToLower(t.T) + case *schema.StringType: + f = strings.ToLower(t.T) + case *schema.TimeType: + f = strings.ToLower(t.T) + case *schema.FloatType: + f = strings.ToLower(t.T) + case *schema.DecimalType: + f = strings.ToLower(t.T) + case *schema.JSONType: + f = strings.ToLower(t.T) + case *schema.SpatialType: + f = strings.ToLower(t.T) + case *UUIDType: + f = strings.ToLower(t.T) + case *schema.UnsupportedType: + return "", fmt.Errorf("sqlite: unsupported type: %q", t.T) + default: + return "", fmt.Errorf("sqlite: invalid schema type: %T", t) + } + return f, nil +} + +// ParseType returns the schema.Type value represented by the given raw type. +// It is expected to be one of the types in https://www.sqlite.org/datatypes.html, +// or some of the common types used by ORMs like Ent. +func ParseType(c string) (schema.Type, error) { + // A datatype may be zero or more names. + if c == "" { + return &schema.UnsupportedType{}, nil + } + parts := columnParts(c) + switch t := parts[0]; t { + case "bool", "boolean": + return &schema.BoolType{T: t}, nil + case "blob": + return &schema.BinaryType{T: t}, nil + case "int2", "int8", "int", "uint64", "integer", "tinyint", "smallint", "mediumint", "bigint", "unsigned big int": + // All integer types have the same "type affinity". + return &schema.IntegerType{T: t}, nil + case "real", "double", "double precision", "float": + return &schema.FloatType{T: t}, nil + case "numeric", "decimal": + ct := &schema.DecimalType{T: t} + if len(parts) > 1 { + p, err := strconv.ParseInt(parts[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("parse precision %q", parts[1]) + } + ct.Precision = int(p) + } + if len(parts) > 2 { + s, err := strconv.ParseInt(parts[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("parse scale %q", parts[1]) + } + ct.Scale = int(s) + } + return ct, nil + case "char", "character", "varchar", "varying character", "nchar", "native character", "nvarchar", "text", "clob": + ct := &schema.StringType{T: t} + if len(parts) > 1 { + p, err := strconv.ParseInt(parts[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("parse size %q", parts[1]) + } + ct.Size = int(p) + } + return ct, nil + case "json": + return &schema.JSONType{T: t}, nil + case "date", "datetime", "time", "timestamp": + return &schema.TimeType{T: t}, nil + case "uuid": + return &UUIDType{T: t}, nil + default: + return &schema.UnsupportedType{T: t}, nil + } +} diff --git a/vendor/ariga.io/atlas/sql/sqlite/diff.go b/vendor/ariga.io/atlas/sql/sqlite/diff.go new file mode 100644 index 00000000..5d55bb37 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlite/diff.go @@ -0,0 +1,174 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlite + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/schema" +) + +// DefaultDiff provides basic diffing capabilities for MySQL dialects. +// Note, it is recommended to call Open, create a new Driver and use its +// Differ when a database connection is available. +var DefaultDiff schema.Differ = &sqlx.Diff{DiffDriver: &diff{}} + +// A diff provides a SQLite implementation for sqlx.DiffDriver. +type diff struct{} + +// SchemaAttrDiff returns a changeset for migrating schema attributes from one state to the other. +func (d *diff) SchemaAttrDiff(_, _ *schema.Schema) []schema.Change { + // No special schema attribute diffing for SQLite. + return nil +} + +// TableAttrDiff returns a changeset for migrating table attributes from one state to the other. +func (d *diff) TableAttrDiff(from, to *schema.Table) ([]schema.Change, error) { + var changes []schema.Change + switch { + case sqlx.Has(from.Attrs, &WithoutRowID{}) && !sqlx.Has(to.Attrs, &WithoutRowID{}): + changes = append(changes, &schema.DropAttr{ + A: &WithoutRowID{}, + }) + case !sqlx.Has(from.Attrs, &WithoutRowID{}) && sqlx.Has(to.Attrs, &WithoutRowID{}): + changes = append(changes, &schema.AddAttr{ + A: &WithoutRowID{}, + }) + } + return append(changes, sqlx.CheckDiff(from, to)...), nil +} + +// ColumnChange returns the schema changes (if any) for migrating one column to the other. +func (d *diff) ColumnChange(_ *schema.Table, from, to *schema.Column) (schema.ChangeKind, error) { + change := sqlx.CommentChange(from.Attrs, to.Attrs) + if from.Type.Null != to.Type.Null { + change |= schema.ChangeNull + } + changed, err := d.typeChanged(from, to) + if err != nil { + return schema.NoChange, err + } + if changed { + change |= schema.ChangeType + } + if changed := d.defaultChanged(from, to); changed { + change |= schema.ChangeDefault + } + if d.generatedChanged(from, to) { + change |= schema.ChangeGenerated + } + return change, nil +} + +// typeChanged reports if the column type was changed. +func (d *diff) typeChanged(from, to *schema.Column) (bool, error) { + fromT, toT := from.Type.Type, to.Type.Type + if fromT == nil || toT == nil { + return false, fmt.Errorf("sqlite: missing type information for column %q", from.Name) + } + // Types are mismatched if they do not have the same "type affinity". + return reflect.TypeOf(fromT) != reflect.TypeOf(toT), nil +} + +// defaultChanged reports if the default value of a column was changed. +func (d *diff) defaultChanged(from, to *schema.Column) bool { + d1, ok1 := sqlx.DefaultValue(from) + d2, ok2 := sqlx.DefaultValue(to) + if ok1 != ok2 { + return true + } + if d1 == d2 { + return false + } + x1, err1 := sqlx.Unquote(d1) + x2, err2 := sqlx.Unquote(d2) + return err1 != nil || err2 != nil || x1 != x2 +} + +// generatedChanged reports if the generated expression of a column was changed. +func (*diff) generatedChanged(from, to *schema.Column) bool { + var ( + fromX, toX schema.GeneratedExpr + fromHas, toHas = sqlx.Has(from.Attrs, &fromX), sqlx.Has(to.Attrs, &toX) + ) + return fromHas != toHas || fromHas && (sqlx.MayWrap(fromX.Expr) != sqlx.MayWrap(toX.Expr) || storedOrVirtual(fromX.Type) != storedOrVirtual(toX.Type)) +} + +// IsGeneratedIndexName reports if the index name was generated by the database. +// See: https://github.com/sqlite/sqlite/blob/e937df8/src/build.c#L3583. +func (d *diff) IsGeneratedIndexName(t *schema.Table, idx *schema.Index) bool { + p := fmt.Sprintf("sqlite_autoindex_%s_", t.Name) + if !strings.HasPrefix(idx.Name, p) { + return false + } + i, err := strconv.ParseInt(strings.TrimPrefix(idx.Name, p), 10, 64) + return err == nil && i > 0 +} + +// IndexAttrChanged reports if the index attributes were changed. +func (*diff) IndexAttrChanged(from, to []schema.Attr) bool { + var p1, p2 IndexPredicate + return sqlx.Has(from, &p1) != sqlx.Has(to, &p2) || (p1.P != p2.P && p1.P != sqlx.MayWrap(p2.P)) +} + +// IndexPartAttrChanged reports if the index-part attributes were changed. +func (*diff) IndexPartAttrChanged(_, _ *schema.Index, _ int) bool { + return false +} + +// ReferenceChanged reports if the foreign key referential action was changed. +func (*diff) ReferenceChanged(from, to schema.ReferenceOption) bool { + // According to SQLite, if an action is not explicitly + // specified, it defaults to "NO ACTION". + if from == "" { + from = schema.NoAction + } + if to == "" { + to = schema.NoAction + } + return from != to +} + +// Normalize implements the sqlx.Normalizer interface. +func (d *diff) Normalize(from, to *schema.Table) error { + used := make([]bool, len(to.ForeignKeys)) + // In SQLite, there is no easy way to get the foreign-key constraint + // name, except for parsing the CREATE statement. Therefore, we check + // if there is a foreign-key with identical properties. + for _, fk1 := range from.ForeignKeys { + for i, fk2 := range to.ForeignKeys { + if used[i] { + continue + } + if fk2.Symbol == fk1.Symbol && !isNumber(fk1.Symbol) || sameFK(fk1, fk2) { + fk1.Symbol = fk2.Symbol + used[i] = true + } + } + } + return nil +} + +func sameFK(fk1, fk2 *schema.ForeignKey) bool { + if fk1.Table.Name != fk2.Table.Name || fk1.RefTable.Name != fk2.RefTable.Name || + len(fk1.Columns) != len(fk2.Columns) || len(fk1.RefColumns) != len(fk2.RefColumns) { + return false + } + for i, c1 := range fk1.Columns { + if c1.Name != fk2.Columns[i].Name { + return false + } + } + for i, c1 := range fk1.RefColumns { + if c1.Name != fk2.RefColumns[i].Name { + return false + } + } + return true +} diff --git a/vendor/ariga.io/atlas/sql/sqlite/driver.go b/vendor/ariga.io/atlas/sql/sqlite/driver.go new file mode 100644 index 00000000..3a944b3b --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlite/driver.go @@ -0,0 +1,324 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlite + +import ( + "context" + "database/sql" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" + "ariga.io/atlas/sql/sqlclient" +) + +type ( + // Driver represents a SQLite driver for introspecting database schemas, + // generating diff between schema elements and apply migrations changes. + Driver struct { + conn + schema.Differ + schema.Inspector + migrate.PlanApplier + } + + // database connection and its information. + conn struct { + schema.ExecQuerier + // System variables that are set on `Open`. + version string + collations []string + } +) + +// DriverName holds the name used for registration. +const DriverName = "sqlite3" + +func init() { + sqlclient.Register( + DriverName, + sqlclient.DriverOpener(Open), + sqlclient.RegisterTxOpener(OpenTx), + sqlclient.RegisterCodec(MarshalHCL, EvalHCL), + sqlclient.RegisterFlavours("sqlite"), + sqlclient.RegisterURLParser(sqlclient.URLParserFunc(func(u *url.URL) *sqlclient.URL { + uc := &sqlclient.URL{URL: u, DSN: strings.TrimPrefix(u.String(), u.Scheme+"://"), Schema: mainFile} + if mode := u.Query().Get("mode"); mode == "memory" { + // The "file:" prefix is mandatory for memory modes. + uc.DSN = "file:" + uc.DSN + } + return uc + })), + ) +} + +// Open opens a new SQLite driver. +func Open(db schema.ExecQuerier) (migrate.Driver, error) { + var ( + c = conn{ExecQuerier: db} + ctx = context.Background() + ) + rows, err := db.QueryContext(ctx, "SELECT sqlite_version()") + if err != nil { + return nil, fmt.Errorf("sqlite: query version pragma: %w", err) + } + if err := sqlx.ScanOne(rows, &c.version); err != nil { + return nil, fmt.Errorf("sqlite: scan version pragma: %w", err) + } + if rows, err = db.QueryContext(ctx, "SELECT name FROM pragma_collation_list()"); err != nil { + return nil, fmt.Errorf("sqlite: query collation_list pragma: %w", err) + } + if c.collations, err = sqlx.ScanStrings(rows); err != nil { + return nil, fmt.Errorf("sqlite: scanning database collations: %w", err) + } + return &Driver{ + conn: c, + Differ: &sqlx.Diff{DiffDriver: &diff{}}, + Inspector: &inspect{c}, + PlanApplier: &planApply{c}, + }, nil +} + +// Snapshot implements migrate.Snapshoter. +func (d *Driver) Snapshot(ctx context.Context) (migrate.RestoreFunc, error) { + r, err := d.InspectRealm(ctx, nil) + if err != nil { + return nil, err + } + if !(r == nil || (len(r.Schemas) == 1 && r.Schemas[0].Name == mainFile && len(r.Schemas[0].Tables) == 0)) { + return nil, &migrate.NotCleanError{Reason: fmt.Sprintf("found table %q", r.Schemas[0].Tables[0].Name)} + } + return func(ctx context.Context) error { + for _, stmt := range []string{ + "PRAGMA writable_schema = 1;", + "DELETE FROM sqlite_master WHERE type IN ('table', 'index', 'trigger');", + "PRAGMA writable_schema = 0;", + "VACUUM;", + } { + if _, err := d.ExecContext(ctx, stmt); err != nil { + return err + } + } + return nil + }, nil +} + +// CheckClean implements migrate.CleanChecker. +func (d *Driver) CheckClean(ctx context.Context, revT *migrate.TableIdent) error { + r, err := d.InspectRealm(ctx, nil) + if err != nil { + return err + } + switch n := len(r.Schemas); { + case n > 1: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found multiple schemas: %d", len(r.Schemas))} + case n == 1 && r.Schemas[0].Name != mainFile: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found schema %q", r.Schemas[0].Name)} + case n == 1 && len(r.Schemas[0].Tables) > 1: + return &migrate.NotCleanError{Reason: fmt.Sprintf("found multiple tables: %d", len(r.Schemas[0].Tables))} + case n == 1 && len(r.Schemas[0].Tables) == 1 && (revT == nil || r.Schemas[0].Tables[0].Name != revT.Name): + return &migrate.NotCleanError{Reason: fmt.Sprintf("found table %q", r.Schemas[0].Tables[0].Name)} + } + return nil +} + +// Lock implements the schema.Locker interface. +func (d *Driver) Lock(_ context.Context, name string, timeout time.Duration) (schema.UnlockFunc, error) { + path := filepath.Join(os.TempDir(), name+".lock") + c, err := os.ReadFile(path) + if errors.Is(err, os.ErrNotExist) { + return acquireLock(path, timeout) + } + if err != nil { + return nil, fmt.Errorf("sql/sqlite: reading lock dir: %w", err) + } + expires, err := strconv.ParseInt(string(c), 10, 64) + if err != nil { + return nil, fmt.Errorf("sql/sqlite: invalid lock file format: parsing expiration date: %w", err) + } + if time.Unix(0, expires).After(time.Now()) { + // Lock is still valid. + return nil, fmt.Errorf("sql/sqlite: lock on %q already taken", name) + } + return acquireLock(path, timeout) +} + +// Version returns the version of the connected database. +func (d *Driver) Version() string { + return d.conn.version +} + +func acquireLock(path string, timeout time.Duration) (schema.UnlockFunc, error) { + lock, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("sql/sqlite: creating lockfile %q: %w", path, err) + } + if _, err := lock.Write([]byte(strconv.FormatInt(time.Now().Add(timeout).UnixNano(), 10))); err != nil { + return nil, fmt.Errorf("sql/sqlite: writing to lockfile %q: %w", path, err) + } + defer lock.Close() + return func() error { return os.Remove(path) }, nil +} + +type violation struct { + tbl, ref string + row, index int +} + +// OpenTx opens a transaction. If foreign keys are enabled, it disables them, checks for constraint violations, +// opens the transaction and before committing ensures no new violations have been introduced by whatever Atlas was +// doing. +func OpenTx(ctx context.Context, db *sql.DB, opts *sql.TxOptions) (*sqlclient.Tx, error) { + var on sql.NullBool + if err := db.QueryRowContext(ctx, "PRAGMA foreign_keys").Scan(&on); err != nil { + return nil, fmt.Errorf("sql/sqlite: querying 'foreign_keys' pragma: %w", err) + } + // Disable the foreign_keys pragma in case it is enabled, and + // toggle it back after transaction is committed or rolled back. + if on.Bool { + _, err := db.ExecContext(ctx, "PRAGMA foreign_keys = off") + if err != nil { + return nil, fmt.Errorf("sql/sqlite: set 'foreign_keys = off': %w", err) + } + } + tx, err := db.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + cm, err := CommitFunc(ctx, db, tx, on.Bool) + if err != nil { + return nil, err + } + return &sqlclient.Tx{ + Tx: tx, + CommitFn: cm, + RollbackFn: RollbackFunc(ctx, db, tx, on.Bool), + }, nil +} + +// Tx wraps schema.ExecQuerier with the transaction methods. +type Tx interface { + schema.ExecQuerier + Commit() error + Rollback() error +} + +// CommitFunc takes a transaction and ensures to toggle foreign keys back on after tx.Commit is called. +func CommitFunc(ctx context.Context, db schema.ExecQuerier, tx Tx, on bool) (func() error, error) { + var ( + before []violation + err error + ) + if on { + before, err = violations(ctx, tx) + if err != nil { + return nil, err + } + } + return func() error { + if on { + after, err := violations(ctx, tx) + if err != nil { + if err2 := tx.Rollback(); err2 != nil { + err = fmt.Errorf("%v: %w", err2, err) + } + return enableFK(ctx, db, on, err) + } + if vs := violationsDiff(before, after); len(vs) > 0 { + err := fmt.Errorf("sql/sqlite: foreign key mismatch: %+v", vs) + if err2 := tx.Rollback(); err2 != nil { + err = fmt.Errorf("%v: %w", err2, err) + } + return enableFK(ctx, db, on, err) + } + } + return enableFK(ctx, db, on, tx.Commit()) + }, nil +} + +// RollbackFunc takes a transaction and ensures to toggle foreign keys back on after tx.Rollback is called. +func RollbackFunc(ctx context.Context, db schema.ExecQuerier, tx Tx, on bool) func() error { + return func() error { + return enableFK(ctx, db, on, tx.Rollback()) + } +} + +func enableFK(ctx context.Context, db schema.ExecQuerier, do bool, err error) error { + if do { + // Re-enable foreign key checks if they were enabled before. + if _, err2 := db.ExecContext(ctx, "PRAGMA foreign_keys = on"); err2 != nil { + err2 = fmt.Errorf("sql/sqlite: set 'foreign_keys = on': %w", err2) + if err != nil { + return fmt.Errorf("%v: %w", err2, err) + } + return err2 + } + } + return err +} + +func violations(ctx context.Context, conn schema.ExecQuerier) ([]violation, error) { + rows, err := conn.QueryContext(ctx, "PRAGMA foreign_key_check") + if err != nil { + return nil, fmt.Errorf("sql/sqlite: querying 'foreign_key_check' pragma: %w", err) + } + defer rows.Close() + var vs []violation + for rows.Next() { + var v violation + if err := rows.Scan(&v.tbl, &v.row, &v.ref, &v.index); err != nil { + return nil, fmt.Errorf("sql/sqlite: querying 'foreign_key_check' pragma: scanning rows: %w", err) + } + vs = append(vs, v) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("sql/sqlite: querying 'foreign_key_check' pragma: scanning rows: %w", err) + } + return vs, nil +} + +// equalViolations compares the foreign key violations before starting a transaction with the ones afterwards. +// It returns violations found in v2 that are not in v1. +func violationsDiff(v1, v2 []violation) (vs []violation) { + for _, v := range v2 { + if !contains(v1, v) { + vs = append(vs, v) + } + } + return vs +} + +func contains(hs []violation, n violation) bool { + for _, v := range hs { + if v.row == n.row && v.ref == n.ref && v.index == n.index && v.tbl == n.tbl { + return true + } + } + return false +} + +// SQLite standard data types as defined in its codebase and documentation. +// https://www.sqlite.org/datatype3.html +// https://github.com/sqlite/sqlite/blob/master/src/global.c +const ( + TypeInteger = "integer" // SQLITE_TYPE_INTEGER + TypeReal = "real" // SQLITE_TYPE_REAL + TypeText = "text" // SQLITE_TYPE_TEXT + TypeBlob = "blob" // SQLITE_TYPE_BLOB +) + +// SQLite generated columns types. +const ( + virtual = "VIRTUAL" + stored = "STORED" +) diff --git a/vendor/ariga.io/atlas/sql/sqlite/inspect.go b/vendor/ariga.io/atlas/sql/sqlite/inspect.go new file mode 100644 index 00000000..d2328f8b --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlite/inspect.go @@ -0,0 +1,727 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlite + +import ( + "context" + "database/sql" + "fmt" + "regexp" + "strconv" + "strings" + "unicode" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/schema" +) + +// A diff provides an SQLite implementation for schema.Inspector. +type inspect struct{ conn } + +var _ schema.Inspector = (*inspect)(nil) + +// InspectRealm returns schema descriptions of all resources in the given realm. +func (i *inspect) InspectRealm(ctx context.Context, opts *schema.InspectRealmOption) (*schema.Realm, error) { + schemas, err := i.databases(ctx, opts) + if err != nil { + return nil, err + } + if len(schemas) > 1 { + return nil, fmt.Errorf("sqlite: multiple database files are not supported by the driver. got: %d", len(schemas)) + } + if opts == nil { + opts = &schema.InspectRealmOption{} + } + r := schema.NewRealm(schemas...) + if !sqlx.ModeInspectRealm(opts).Is(schema.InspectTables) { + return sqlx.ExcludeRealm(r, opts.Exclude) + } + for _, s := range schemas { + tables, err := i.tables(ctx, nil) + if err != nil { + return nil, err + } + s.AddTables(tables...) + for _, t := range tables { + if err := i.inspectTable(ctx, t); err != nil { + return nil, err + } + } + } + sqlx.LinkSchemaTables(r.Schemas) + return sqlx.ExcludeRealm(r, opts.Exclude) +} + +// InspectSchema returns schema descriptions of the tables in the given schema. +// If the schema name is empty, the "main" database is used. +func (i *inspect) InspectSchema(ctx context.Context, name string, opts *schema.InspectOptions) (*schema.Schema, error) { + if name == "" { + name = mainFile + } + schemas, err := i.databases(ctx, &schema.InspectRealmOption{ + Schemas: []string{name}, + }) + if err != nil { + return nil, err + } + if len(schemas) == 0 { + return nil, &schema.NotExistError{ + Err: fmt.Errorf("sqlite: schema %q was not found", name), + } + } + if opts == nil { + opts = &schema.InspectOptions{} + } + r := schema.NewRealm(schemas...) + if !sqlx.ModeInspectSchema(opts).Is(schema.InspectTables) { + return sqlx.ExcludeSchema(r.Schemas[0], opts.Exclude) + } + tables, err := i.tables(ctx, opts) + if err != nil { + return nil, err + } + r.Schemas[0].AddTables(tables...) + for _, t := range tables { + if err := i.inspectTable(ctx, t); err != nil { + return nil, err + } + } + sqlx.LinkSchemaTables(schemas) + return sqlx.ExcludeSchema(r.Schemas[0], opts.Exclude) +} + +func (i *inspect) inspectTable(ctx context.Context, t *schema.Table) error { + if err := i.columns(ctx, t); err != nil { + return err + } + if err := i.indexes(ctx, t); err != nil { + return err + } + if err := i.fks(ctx, t); err != nil { + return err + } + return fillChecks(t) +} + +// columns queries and appends the columns of the given table. +func (i *inspect) columns(ctx context.Context, t *schema.Table) error { + rows, err := i.QueryContext(ctx, fmt.Sprintf(columnsQuery, t.Name)) + if err != nil { + return fmt.Errorf("sqlite: querying %q columns: %w", t.Name, err) + } + defer rows.Close() + for rows.Next() { + if err := i.addColumn(t, rows); err != nil { + return fmt.Errorf("sqlite: %w", err) + } + } + return autoinc(t) +} + +// addColumn scans the current row and adds a new column from it to the table. +func (i *inspect) addColumn(t *schema.Table, rows *sql.Rows) error { + var ( + nullable, primary bool + hidden sql.NullInt64 + name, typ, defaults sql.NullString + err error + ) + if err = rows.Scan(&name, &typ, &nullable, &defaults, &primary, &hidden); err != nil { + return err + } + c := &schema.Column{ + Name: name.String, + Type: &schema.ColumnType{ + Raw: typ.String, + Null: nullable, + }, + } + c.Type.Type, err = ParseType(typ.String) + if err != nil { + return err + } + if defaults.Valid { + c.Default = defaultExpr(defaults.String) + } + // The hidden flag is set to 2 for VIRTUAL columns, and to + // 3 for STORED columns. See: sqlite/pragma.c#sqlite3Pragma. + if hidden.Int64 >= 2 { + if err := setGenExpr(t, c, hidden.Int64); err != nil { + return err + } + } + t.Columns = append(t.Columns, c) + if primary { + if t.PrimaryKey == nil { + t.PrimaryKey = &schema.Index{ + Name: "PRIMARY", + Unique: true, + Table: t, + } + } + // Columns are ordered by the `pk` field. + t.PrimaryKey.Parts = append(t.PrimaryKey.Parts, &schema.IndexPart{ + C: c, + SeqNo: len(t.PrimaryKey.Parts) + 1, + }) + } + return nil +} + +// indexes queries and appends the indexes of the given table. +func (i *inspect) indexes(ctx context.Context, t *schema.Table) error { + rows, err := i.QueryContext(ctx, fmt.Sprintf(indexesQuery, t.Name)) + if err != nil { + return fmt.Errorf("sqlite: querying %q indexes: %w", t.Name, err) + } + if err := i.addIndexes(t, rows); err != nil { + return fmt.Errorf("sqlite: scan %q indexes: %w", t.Name, err) + } + for _, idx := range t.Indexes { + if err := i.indexInfo(ctx, t, idx); err != nil { + return err + } + } + return nil +} + +// addIndexes scans the rows and adds the indexes to the table. +func (i *inspect) addIndexes(t *schema.Table, rows *sql.Rows) error { + defer rows.Close() + for rows.Next() { + var ( + uniq, partial bool + name, origin, stmt sql.NullString + ) + if err := rows.Scan(&name, &uniq, &origin, &partial, &stmt); err != nil { + return err + } + if origin.String == "pk" { + continue + } + idx := &schema.Index{ + Name: name.String, + Unique: uniq, + Table: t, + Attrs: []schema.Attr{ + &CreateStmt{S: stmt.String}, + &IndexOrigin{O: origin.String}, + }, + } + if partial { + i := strings.Index(stmt.String, "WHERE") + if i == -1 { + return fmt.Errorf("missing partial WHERE clause in: %s", stmt.String) + } + idx.Attrs = append(idx.Attrs, &IndexPredicate{ + P: strings.TrimSpace(stmt.String[i+5:]), + }) + } + t.Indexes = append(t.Indexes, idx) + } + return nil +} + +var ( + // A regexp to extract index parts. + reIdxParts = regexp.MustCompile("(?i)ON\\s+[\"`]*(?:\\w+)[\"`]*\\s*\\((.+?)\\)(\\s*WHERE\\s+.+)?$") + reIdxDesc = regexp.MustCompile("(?i)\\s+DESC\\s*$") +) + +func (i *inspect) indexInfo(ctx context.Context, t *schema.Table, idx *schema.Index) error { + var ( + hasExpr bool + rows, err = i.QueryContext(ctx, fmt.Sprintf(indexColumnsQuery, idx.Name)) + ) + if err != nil { + return fmt.Errorf("sqlite: querying %q indexes: %w", t.Name, err) + } + defer rows.Close() + for rows.Next() { + var ( + desc sql.NullBool + name sql.NullString + ) + if err := rows.Scan(&name, &desc); err != nil { + return fmt.Errorf("sqlite: scanning index names: %w", err) + } + part := &schema.IndexPart{ + SeqNo: len(idx.Parts) + 1, + Desc: desc.Bool, + } + switch c, ok := t.Column(name.String); { + case ok: + part.C = c + // NULL name indicates that the index-part is an expression and we + // should extract it from the `CREATE INDEX` statement (not supported atm). + case !sqlx.ValidString(name): + hasExpr = true + part.X = &schema.RawExpr{X: ""} + default: + return fmt.Errorf("sqlite: column %q was not found for index %q", name.String, idx.Name) + } + idx.Parts = append(idx.Parts, part) + } + if !hasExpr { + return nil + } + var c CreateStmt + if !sqlx.Has(idx.Attrs, &c) || !reIdxParts.MatchString(c.S) { + return nil + } + x := reIdxParts.FindStringSubmatch(c.S)[1] + for _, p := range idx.Parts { + j := sqlx.ExprLastIndex(x) + // Unable to parse index parts correctly. + if j == -1 { + return nil + } + if p.X != nil { + // Remove any extra spaces and the "DESC" clause + // in case the key-part is descending. + kx := strings.TrimSpace(x[:j+1]) + if p.Desc { + kx = reIdxDesc.ReplaceAllString(kx, "") + } + p.X.(*schema.RawExpr).X = kx + } + x = strings.TrimLeft(x[j+1:], ", ") + } + return nil +} + +// fks queries and appends the foreign-keys of the given table. +func (i *inspect) fks(ctx context.Context, t *schema.Table) error { + rows, err := i.QueryContext(ctx, fmt.Sprintf(fksQuery, t.Name)) + if err != nil { + return fmt.Errorf("sqlite: querying %q foreign-keys: %w", t.Name, err) + } + if err := i.addFKs(t, rows); err != nil { + return fmt.Errorf("sqlite: scan %q foreign-keys: %w", t.Name, err) + } + return fillConstName(t) +} + +func (i *inspect) addFKs(t *schema.Table, rows *sql.Rows) error { + ids := make(map[int]*schema.ForeignKey) + for rows.Next() { + var ( + id int + column, refColumn, refTable, updateRule, deleteRule string + ) + if err := rows.Scan(&id, &column, &refColumn, &refTable, &updateRule, &deleteRule); err != nil { + return err + } + fk, ok := ids[id] + if !ok { + fk = &schema.ForeignKey{ + Symbol: strconv.Itoa(id), + Table: t, + RefTable: t, + OnDelete: schema.ReferenceOption(deleteRule), + OnUpdate: schema.ReferenceOption(updateRule), + } + if refTable != t.Name { + fk.RefTable = &schema.Table{Name: refTable, Schema: &schema.Schema{Name: t.Schema.Name}} + } + ids[id] = fk + t.ForeignKeys = append(t.ForeignKeys, fk) + } + c, ok := t.Column(column) + if !ok { + return fmt.Errorf("column %q was not found for fk %q", column, fk.Symbol) + } + // Rows are ordered by SEQ that specifies the + // position of the column in the FK definition. + if _, ok := fk.Column(c.Name); !ok { + fk.Columns = append(fk.Columns, c) + c.ForeignKeys = append(c.ForeignKeys, fk) + } + + // Stub referenced columns or link if it is a self-reference. + var rc *schema.Column + if fk.Table != fk.RefTable { + rc = &schema.Column{Name: refColumn} + } else if c, ok := t.Column(refColumn); ok { + rc = c + } else { + return fmt.Errorf("referenced column %q was not found for fk %q", refColumn, fk.Symbol) + } + if _, ok := fk.RefColumn(rc.Name); !ok { + fk.RefColumns = append(fk.RefColumns, rc) + } + } + return nil +} + +// tableNames returns a list of all tables exist in the schema. +func (i *inspect) tables(ctx context.Context, opts *schema.InspectOptions) ([]*schema.Table, error) { + var ( + args []any + query = tablesQuery + ) + if opts != nil && len(opts.Tables) > 0 { + query += " AND name IN (" + strings.Repeat("?, ", len(opts.Tables)-1) + "?)" + for _, s := range opts.Tables { + args = append(args, s) + } + } + rows, err := i.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("sqlite: querying schema tables: %w", err) + } + defer rows.Close() + var tables []*schema.Table + for rows.Next() { + var name, stmt string + if err := rows.Scan(&name, &stmt); err != nil { + return nil, fmt.Errorf("sqlite: scanning table: %w", err) + } + stmt = strings.TrimSpace(stmt) + t := &schema.Table{ + Name: name, + Attrs: []schema.Attr{ + &CreateStmt{S: strings.TrimSpace(stmt)}, + }, + } + if strings.HasSuffix(stmt, "WITHOUT ROWID") || strings.HasSuffix(stmt, "without rowid") { + t.Attrs = append(t.Attrs, &WithoutRowID{}) + } + tables = append(tables, t) + } + return tables, nil +} + +// schemas returns the list of the schemas in the database. +func (i *inspect) databases(ctx context.Context, opts *schema.InspectRealmOption) ([]*schema.Schema, error) { + var ( + args []any + query = databasesQuery + ) + if opts != nil && len(opts.Schemas) > 0 { + query = fmt.Sprintf(databasesQueryArgs, strings.Repeat("?, ", len(opts.Schemas)-1)+"?") + for _, s := range opts.Schemas { + args = append(args, s) + } + } + rows, err := i.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("sqlite: querying schemas: %w", err) + } + defer rows.Close() + var schemas []*schema.Schema + for rows.Next() { + var name, file sql.NullString + if err := rows.Scan(&name, &file); err != nil { + return nil, err + } + // File is missing if the database is not + // associated with a file (:memory: mode). + if file.String == "" { + file.String = ":memory:" + } + schemas = append(schemas, &schema.Schema{ + Name: name.String, + Attrs: []schema.Attr{&File{Name: file.String}}, + }) + } + return schemas, nil +} + +type ( + // File describes a database file. + File struct { + schema.Attr + Name string + } + + // CreateStmt describes the SQL statement used to create a resource. + CreateStmt struct { + schema.Attr + S string + } + + // AutoIncrement describes the `AUTOINCREMENT` configuration. + // https://www.sqlite.org/autoinc.html + AutoIncrement struct { + schema.Attr + // Seq represents the value in sqlite_sequence table. + // i.e. https://www.sqlite.org/fileformat2.html#seqtab. + // + // Setting this value manually to > 0 indicates that + // a custom value is necessary and should be handled + // on migrate. + Seq int64 + } + + // WithoutRowID describes the `WITHOUT ROWID` configuration. + // See: https://sqlite.org/withoutrowid.html + WithoutRowID struct { + schema.Attr + } + + // IndexPredicate describes a partial index predicate. + // See: https://www.sqlite.org/partialindex.html + IndexPredicate struct { + schema.Attr + P string + } + + // IndexOrigin describes how the index was created. + // See: https://www.sqlite.org/pragma.html#pragma_index_list + IndexOrigin struct { + schema.Attr + O string + } + + // A UUIDType defines a UUID type. + UUIDType struct { + schema.Type + T string + } +) + +func columnParts(t string) []string { + t = strings.TrimSpace(strings.ToLower(t)) + parts := strings.FieldsFunc(t, func(r rune) bool { + return r == '(' || r == ')' || r == ' ' || r == ',' + }) + for k := 0; k < 2; k++ { + // Join the type back if it was separated with space (e.g. 'varying character'). + if len(parts) > 1 && !isNumber(parts[0]) && !isNumber(parts[1]) { + parts[1] = parts[0] + " " + parts[1] + parts = parts[1:] + } + } + return parts +} + +func defaultExpr(x string) schema.Expr { + switch { + // Literals definition. + // https://www.sqlite.org/syntax/literal-value.html + case sqlx.IsLiteralBool(x), sqlx.IsLiteralNumber(x), sqlx.IsQuoted(x, '"', '\''), isBlob(x): + return &schema.Literal{V: x} + default: + // We wrap the CURRENT_TIMESTAMP literals in raw-expressions + // as they are not parsable in most decoders. + return &schema.RawExpr{X: x} + } +} + +// isNumber reports whether the string is a number (category N). +func isNumber(s string) bool { + for _, r := range s { + if !unicode.IsNumber(r) { + return false + } + } + return true +} + +// blob literals are hex strings preceded by 'x' (or 'X). +func isBlob(s string) bool { + if (strings.HasPrefix(s, "x'") || strings.HasPrefix(s, "X'")) && strings.HasSuffix(s, "'") { + _, err := strconv.ParseUint(s[2:len(s)-1], 16, 64) + return err == nil + } + return false +} + +var reAutoinc = regexp.MustCompile("(?i)(?:[(,]\\s*)[\"`]?(\\w+)[\"`]?\\s+INTEGER\\s+[^,]*PRIMARY\\s+KEY\\s+[^,]*AUTOINCREMENT") + +// autoinc checks if the table contains a "PRIMARY KEY AUTOINCREMENT" on its +// CREATE statement, according to https://www.sqlite.org/syntax/column-constraint.html. +// This is a workaround until we will embed a proper SQLite parser in atlas. +func autoinc(t *schema.Table) error { + var c CreateStmt + if !sqlx.Has(t.Attrs, &c) { + return fmt.Errorf("missing CREATE statement for table: %q", t.Name) + } + if t.PrimaryKey == nil || len(t.PrimaryKey.Parts) != 1 || t.PrimaryKey.Parts[0].C == nil { + return nil + } + matches := reAutoinc.FindStringSubmatch(c.S) + if len(matches) != 2 { + return nil + } + pkc, ok := t.Column(matches[1]) + if !ok { + return fmt.Errorf("sqlite: column %q was not found for AUTOINCREMENT", matches[1]) + } + if t.PrimaryKey == nil || len(t.PrimaryKey.Parts) != 1 || t.PrimaryKey.Parts[0].C != pkc { + return fmt.Errorf("sqlite: unexpected primary key: %v", t.PrimaryKey) + } + inc := &AutoIncrement{} + // Annotate table elements with "AUTOINCREMENT". + t.PrimaryKey.Attrs = append(t.PrimaryKey.Attrs, inc) + pkc.Attrs = append(pkc.Attrs, inc) + return nil +} + +// setGenExpr extracts the generated expression from the CREATE statement +// and appends it to the column. +func setGenExpr(t *schema.Table, c *schema.Column, f int64) error { + var s CreateStmt + if !sqlx.Has(t.Attrs, &s) { + return fmt.Errorf("missing CREATE statement for table: %q", t.Name) + } + re, err := regexp.Compile(fmt.Sprintf("(?:[(,]\\s*)[\"`]*(%s)[\"`]*[^,]*(?i:GENERATED\\s+ALWAYS)*\\s*(?i:AS){1}\\s*\\(", c.Name)) + if err != nil { + return err + } + idx := re.FindAllStringIndex(s.S, 1) + if len(idx) != 1 || len(idx[0]) != 2 { + return fmt.Errorf("sqlite: generation expression for column %q was not found in create statement", c.Name) + } + expr := scanExpr(s.S[idx[0][1]-1:]) + if expr == "" { + return fmt.Errorf("sqlite: unexpected empty generation expression for column %q", c.Name) + } + typ := virtual + if f == 3 { + typ = stored + } + c.SetGeneratedExpr(&schema.GeneratedExpr{Expr: expr, Type: typ}) + return nil +} + +// The following regexes extract named FKs and CHECK constraints defined in table-constraints or inlined +// as column-constraints. Note, we assume the SQL statements are valid as they are returned by SQLite. +var ( + reFKC = regexp.MustCompile("(?i)(?:[(,]\\s*)[\"`]*(\\w+)[\"`]*[^,]*\\s+CONSTRAINT\\s+[\"`]*(\\w+)[\"`]*\\s+REFERENCES\\s+[\"`]*(\\w+)[\"`]*\\s*\\(([,\"` \\w]+)\\)") + reFKT = regexp.MustCompile("(?i)CONSTRAINT\\s+[\"`]*(\\w+)[\"`]*\\s+FOREIGN\\s+KEY\\s*\\(([,\"` \\w]+)\\)\\s+REFERENCES\\s+[\"`]*(\\w+)[\"`]*\\s*\\(([,\"` \\w]+)\\)") + reCheck = regexp.MustCompile("(?i)(?:CONSTRAINT\\s+[\"`]?(\\w+)[\"`]?\\s+)?CHECK\\s*\\(") +) + +// fillConstName fills foreign-key constrain names from CREATE TABLE statement. +func fillConstName(t *schema.Table) error { + var c CreateStmt + if !sqlx.Has(t.Attrs, &c) { + return fmt.Errorf("missing CREATE statement for table: %q", t.Name) + } + // Loop over table constraints. + for _, m := range reFKT.FindAllStringSubmatch(c.S, -1) { + if len(m) != 5 { + return fmt.Errorf("unexpected number of matches for a table constraint: %q", m) + } + // Pattern matches "constraint_name", "columns", "ref_table" and "ref_columns". + for _, fk := range t.ForeignKeys { + // Found a foreign-key match for the constraint. + if matchFK(fk, columns(m[2]), m[3], columns(m[4])) { + fk.Symbol = m[1] + break + } + } + } + // Loop over inlined column constraints. + for _, m := range reFKC.FindAllStringSubmatch(c.S, -1) { + if len(m) != 5 { + return fmt.Errorf("unexpected number of matches for a column constraint: %q", m) + } + // Pattern matches "column", "constraint_name", "ref_table" and "ref_columns". + for _, fk := range t.ForeignKeys { + // Found a foreign-key match for the constraint. + if matchFK(fk, columns(m[1]), m[3], columns(m[4])) { + fk.Symbol = m[2] + break + } + } + } + return nil +} + +// columns from the matched regex above. +func columns(s string) []string { + names := strings.Split(s, ",") + for i := range names { + names[i] = strings.Trim(strings.TrimSpace(names[i]), "`\"") + } + return names +} + +// matchFK reports if the foreign-key matches the given attributes. +func matchFK(fk *schema.ForeignKey, columns []string, refTable string, refColumns []string) bool { + if len(fk.Columns) != len(columns) || fk.RefTable.Name != refTable || len(fk.RefColumns) != len(refColumns) { + return false + } + for i := range columns { + if fk.Columns[i].Name != columns[i] { + return false + } + } + for i := range refColumns { + if fk.RefColumns[i].Name != refColumns[i] { + return false + } + } + return true +} + +// fillChecks extracts the CHECK constrains from the CREATE TABLE statement, +// and appends them to the table attributes. +func fillChecks(t *schema.Table) error { + var c CreateStmt + if !sqlx.Has(t.Attrs, &c) { + return fmt.Errorf("missing CREATE statement for table: %q", t.Name) + } + for i := 0; i < len(c.S); { + idx := reCheck.FindStringSubmatchIndex(c.S[i:]) + // No more matches. + if len(idx) != 4 { + break + } + check := &schema.Check{Expr: scanExpr(c.S[idx[1]-1:])} + // Matching group for constraint name. + if idx[2] != -1 && idx[3] != -1 { + check.Name = c.S[idx[2]:idx[3]] + } + t.Attrs = append(t.Attrs, check) + c.S = c.S[idx[1]+len(check.Expr)-1:] + } + return nil +} + +// scanExpr scans the expression string (wrapped with parens) +// until its end in the given string. e.g. "(a+1), c int ...". +func scanExpr(expr string) string { + var r, l int + for i := 0; i < len(expr); i++ { + switch expr[i] { + case '(': + r++ + case ')': + l++ + case '\'', '"': + // Skip unescaped strings. + if j := strings.IndexByte(expr[i+1:], expr[i]); j != -1 { + i += j + 1 + } + } + // Balanced parens. + if r == l { + return expr[:i+1] + } + } + return "" +} + +const ( + // Name of main database file. + mainFile = "main" + // Query to list attached database files. + databasesQuery = "SELECT `name`, `file` FROM pragma_database_list() WHERE `name` <> 'temp'" + databasesQueryArgs = "SELECT `name`, `file` FROM pragma_database_list() WHERE `name` IN (%s)" + // Query to list database tables. + tablesQuery = "SELECT `name`, `sql` FROM sqlite_master WHERE `type` = 'table' AND `name` NOT LIKE 'sqlite_%'" + // Query to list table information. + columnsQuery = "SELECT `name`, `type`, (not `notnull`) AS `nullable`, `dflt_value`, (`pk` <> 0) AS `pk`, `hidden` FROM pragma_table_xinfo('%s') ORDER BY `cid`" + // Query to list table indexes. + indexesQuery = "SELECT `il`.`name`, `il`.`unique`, `il`.`origin`, `il`.`partial`, `m`.`sql` FROM pragma_index_list('%s') AS il JOIN sqlite_master AS m ON il.name = m.name" + // Query to list index columns. + indexColumnsQuery = "SELECT name, desc FROM pragma_index_xinfo('%s') WHERE key = 1 ORDER BY seqno" + // Query to list table foreign-keys. + fksQuery = "SELECT `id`, `from`, `to`, `table`, `on_update`, `on_delete` FROM pragma_foreign_key_list('%s') ORDER BY id, seq" +) diff --git a/vendor/ariga.io/atlas/sql/sqlite/migrate.go b/vendor/ariga.io/atlas/sql/sqlite/migrate.go new file mode 100644 index 00000000..dc1468f5 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlite/migrate.go @@ -0,0 +1,581 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlite + +import ( + "context" + "fmt" + "strings" + + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" +) + +// DefaultPlan provides basic planning capabilities for SQLite dialects. +// Note, it is recommended to call Open, create a new Driver and use its +// migrate.PlanApplier when a database connection is available. +var DefaultPlan migrate.PlanApplier = &planApply{conn: conn{ExecQuerier: sqlx.NoRows}} + +// A planApply provides migration capabilities for schema elements. +type planApply struct{ conn } + +// PlanChanges returns a migration plan for the given schema changes. +func (p *planApply) PlanChanges(ctx context.Context, name string, changes []schema.Change, opts ...migrate.PlanOption) (*migrate.Plan, error) { + s := &state{ + conn: p.conn, + Plan: migrate.Plan{ + Name: name, + Transactional: true, + }, + PlanOptions: migrate.PlanOptions{ + // Currently, the driver does not support attached + // schemas and assumed the connected schema is "main". + SchemaQualifier: new(string), + }, + } + for _, o := range opts { + o(&s.PlanOptions) + } + if err := s.plan(ctx, changes); err != nil { + return nil, err + } + if err := sqlx.SetReversible(&s.Plan); err != nil { + return nil, err + } + // Disable foreign-keys enforcement if it is required + // by one of the changes in the plan. + if s.skipFKs { + // Callers should note that these 2 pragmas are no-op in transactions, + // See: https://sqlite.org/pragma.html#pragma_foreign_keys. + s.Changes = append([]*migrate.Change{{Cmd: "PRAGMA foreign_keys = off", Comment: "disable the enforcement of foreign-keys constraints"}}, s.Changes...) + s.append(&migrate.Change{Cmd: "PRAGMA foreign_keys = on", Comment: "enable back the enforcement of foreign-keys constraints"}) + } + return &s.Plan, nil +} + +// ApplyChanges applies the changes on the database. An error is returned +// if the driver is unable to produce a plan to it, or one of the statements +// is failed or unsupported. +func (p *planApply) ApplyChanges(ctx context.Context, changes []schema.Change, opts ...migrate.PlanOption) error { + return sqlx.ApplyChanges(ctx, changes, p, opts...) +} + +// state represents the state of a planning. It's not part of +// planApply so that multiple planning/applying can be called +// in parallel. +type state struct { + conn + migrate.Plan + migrate.PlanOptions + skipFKs bool +} + +// Exec executes the changes on the database. An error is returned +// if one of the operations fail, or a change is not supported. +func (s *state) plan(ctx context.Context, changes []schema.Change) (err error) { + for _, c := range changes { + switch c := c.(type) { + case *schema.AddTable: + err = s.addTable(ctx, c) + case *schema.DropTable: + err = s.dropTable(ctx, c) + case *schema.ModifyTable: + err = s.modifyTable(ctx, c) + case *schema.RenameTable: + s.renameTable(c) + default: + err = fmt.Errorf("unsupported change %T", c) + } + if err != nil { + return err + } + } + return nil +} + +// addTable builds and executes the query for creating a table in a schema. +func (s *state) addTable(ctx context.Context, add *schema.AddTable) error { + var ( + errs []string + b = s.Build("CREATE TABLE").Table(add.T) + ) + if sqlx.Has(add.Extra, &schema.IfNotExists{}) { + b.P("IF NOT EXISTS") + } + b.WrapIndent(func(b *sqlx.Builder) { + b.MapIndent(add.T.Columns, func(i int, b *sqlx.Builder) { + if err := s.column(b, add.T.Columns[i]); err != nil { + errs = append(errs, err.Error()) + } + }) + // Primary keys with auto-increment are inlined on the column definition. + if pk := add.T.PrimaryKey; pk != nil && !autoincPK(pk) { + b.Comma().NL().P("PRIMARY KEY") + s.indexParts(b, pk.Parts) + } + if len(add.T.ForeignKeys) > 0 { + b.Comma() + s.fks(b, add.T.ForeignKeys...) + } + for _, attr := range add.T.Attrs { + if c, ok := attr.(*schema.Check); ok { + b.Comma().NL() + check(b, c) + } + } + }) + if len(errs) > 0 { + return fmt.Errorf("create table %q: %s", add.T.Name, strings.Join(errs, ", ")) + } + if p := (WithoutRowID{}); sqlx.Has(add.T.Attrs, &p) { + b.P("WITHOUT ROWID") + } + s.append(&migrate.Change{ + Cmd: b.String(), + Source: add, + Reverse: s.Build("DROP TABLE").Table(add.T).String(), + Comment: fmt.Sprintf("create %q table", add.T.Name), + }) + if err := s.tableSeq(ctx, add); err != nil { + return err + } + return s.addIndexes(add.T, add.T.Indexes...) +} + +// dropTable builds and executes the query for dropping a table from a schema. +func (s *state) dropTable(ctx context.Context, drop *schema.DropTable) error { + rs := &state{conn: s.conn, PlanOptions: s.PlanOptions} + if err := rs.addTable(ctx, &schema.AddTable{T: drop.T}); err != nil { + return fmt.Errorf("calculate reverse for drop table %q: %w", drop.T.Name, err) + } + s.skipFKs = true + b := s.Build("DROP TABLE").Ident(drop.T.Name) + if sqlx.Has(drop.Extra, &schema.IfExists{}) { + b.P("IF EXISTS") + } + s.append(&migrate.Change{ + Cmd: b.String(), + Source: drop, + Comment: fmt.Sprintf("drop %q table", drop.T.Name), + // The reverse of 'DROP TABLE' might be a multi + // statement operation. e.g., table with indexes. + Reverse: func() any { + cmd := make([]string, len(rs.Changes)) + for i, c := range rs.Changes { + cmd[i] = c.Cmd + } + if len(cmd) == 1 { + return cmd[0] + } + return cmd + }(), + }) + return nil +} + +// modifyTable builds and executes the queries for bringing the table into its modified state. +// If the modification contains changes that are not index creation/deletion or a simple column +// addition, the changes are applied using a temporary table following the procedure mentioned +// in: https://www.sqlite.org/lang_altertable.html#making_other_kinds_of_table_schema_changes. +func (s *state) modifyTable(ctx context.Context, modify *schema.ModifyTable) error { + if alterable(modify) { + return s.alterTable(modify) + } + s.skipFKs = true + newT := *modify.T + indexes := newT.Indexes + newT.Indexes = nil + newT.Name = "new_" + newT.Name + // Create a new table with a temporary name, and copy the existing rows to it. + if err := s.addTable(ctx, &schema.AddTable{T: &newT}); err != nil { + return err + } + copied, err := s.copyRows(modify.T, &newT, modify.Changes) + if err != nil { + return err + } + // Drop the current table, and rename the new one to its real name. + s.append(&migrate.Change{ + Cmd: s.Build("DROP TABLE").Ident(modify.T.Name).String(), + Source: modify, + Comment: fmt.Sprintf("drop %q table %s", modify.T.Name, func() string { + if copied { + return "after copying rows" + } + return "without copying rows (no columns)" + }()), + }) + s.append(&migrate.Change{ + Cmd: s.Build("ALTER TABLE").Ident(newT.Name).P("RENAME TO").Ident(modify.T.Name).String(), + Source: modify, + Comment: fmt.Sprintf("rename temporary table %q to %q", newT.Name, modify.T.Name), + }) + return s.addIndexes(modify.T, indexes...) +} + +func (s *state) renameTable(c *schema.RenameTable) { + s.append(&migrate.Change{ + Source: c, + Comment: fmt.Sprintf("rename a table from %q to %q", c.From.Name, c.To.Name), + Cmd: s.Build("ALTER TABLE").Table(c.From).P("RENAME TO").Table(c.To).String(), + Reverse: s.Build("ALTER TABLE").Table(c.To).P("RENAME TO").Table(c.From).String(), + }) +} + +func (s *state) column(b *sqlx.Builder, c *schema.Column) error { + t, err := FormatType(c.Type.Type) + if err != nil { + return err + } + b.Ident(c.Name).P(t) + if !c.Type.Null { + b.P("NOT") + } + b.P("NULL") + if c.Default != nil { + x, err := defaultValue(c) + if err != nil { + return err + } + b.P("DEFAULT", x) + } + switch hasA, hasX := sqlx.Has(c.Attrs, &AutoIncrement{}), sqlx.Has(c.Attrs, &schema.GeneratedExpr{}); { + case hasA && hasX: + return fmt.Errorf("both autoincrement and generation expression specified for column %q", c.Name) + case hasA: + b.P("PRIMARY KEY AUTOINCREMENT") + case hasX: + x := &schema.GeneratedExpr{} + sqlx.Has(c.Attrs, x) + b.P("AS", sqlx.MayWrap(x.Expr), x.Type) + } + return nil +} + +func (s *state) dropIndexes(t *schema.Table, indexes ...*schema.Index) error { + rs := &state{conn: s.conn} + if err := rs.addIndexes(t, indexes...); err != nil { + return err + } + for i := range rs.Changes { + s.append(&migrate.Change{ + Cmd: rs.Changes[i].Reverse.(string), + Reverse: rs.Changes[i].Cmd, + Comment: fmt.Sprintf("drop index %q from table: %q", indexes[i].Name, t.Name), + }) + } + return nil +} + +func (s *state) addIndexes(t *schema.Table, indexes ...*schema.Index) error { + for _, idx := range indexes { + // PRIMARY KEY or UNIQUE columns automatically create indexes with the generated name. + // See: sqlite/build.c#sqlite3CreateIndex. Therefore, we ignore such PKs, but create + // the inlined UNIQUE constraints manually with custom name, because SQLite does not + // allow creating indexes with such names manually. Note, this case is possible if + // "apply" schema that was inspected from the database as-is. + if strings.HasPrefix(idx.Name, "sqlite_autoindex") { + if i := (IndexOrigin{}); sqlx.Has(idx.Attrs, &i) && i.O == "p" { + continue + } + // Use the following format:
_. + names := make([]string, len(idx.Parts)+1) + names[0] = t.Name + for i, p := range idx.Parts { + if p.C == nil { + return fmt.Errorf("unexpected index part %s (%d)", idx.Name, i) + } + names[i+1] = p.C.Name + } + idx.Name = strings.Join(names, "_") + } + b := s.Build("CREATE") + if idx.Unique { + b.P("UNIQUE") + } + b.P("INDEX") + if idx.Name != "" { + b.Ident(idx.Name) + } + b.P("ON").Ident(t.Name) + s.indexParts(b, idx.Parts) + if p := (IndexPredicate{}); sqlx.Has(idx.Attrs, &p) { + b.P("WHERE").P(p.P) + } + s.append(&migrate.Change{ + Cmd: b.String(), + Source: &schema.AddIndex{I: idx}, + Reverse: s.Build("DROP INDEX").Ident(idx.Name).String(), + Comment: fmt.Sprintf("create index %q to table: %q", idx.Name, t.Name), + }) + } + return nil +} + +func (s *state) indexParts(b *sqlx.Builder, parts []*schema.IndexPart) { + b.Wrap(func(b *sqlx.Builder) { + b.MapComma(parts, func(i int, b *sqlx.Builder) { + switch part := parts[i]; { + case part.C != nil: + b.Ident(part.C.Name) + case part.X != nil: + b.WriteString(sqlx.MayWrap(part.X.(*schema.RawExpr).X)) + } + if parts[i].Desc { + b.P("DESC") + } + }) + }) +} + +func (s *state) fks(b *sqlx.Builder, fks ...*schema.ForeignKey) { + b.MapIndent(fks, func(i int, b *sqlx.Builder) { + fk := fks[i] + if fk.Symbol != "" { + b.P("CONSTRAINT").Ident(fk.Symbol) + } + b.P("FOREIGN KEY") + b.Wrap(func(b *sqlx.Builder) { + b.MapComma(fk.Columns, func(i int, b *sqlx.Builder) { + b.Ident(fk.Columns[i].Name) + }) + }) + b.P("REFERENCES").Ident(fk.RefTable.Name) + b.Wrap(func(b *sqlx.Builder) { + b.MapComma(fk.RefColumns, func(i int, b *sqlx.Builder) { + b.Ident(fk.RefColumns[i].Name) + }) + }) + if fk.OnUpdate != "" { + b.P("ON UPDATE", string(fk.OnUpdate)) + } + if fk.OnDelete != "" { + b.P("ON DELETE", string(fk.OnDelete)) + } + }) +} + +func (s *state) copyRows(from *schema.Table, to *schema.Table, changes []schema.Change) (bool, error) { + var fromC, toC []string + for _, column := range to.Columns { + // Skip generated columns in INSERT as they are computed. + if sqlx.Has(column.Attrs, &schema.GeneratedExpr{}) { + continue + } + // Find a change that associated with this column, if exists. + var change schema.Change + for i := range changes { + switch c := changes[i].(type) { + case *schema.AddColumn: + if c.C.Name != column.Name { + break + } + if change != nil { + return false, fmt.Errorf("duplicate changes for column: %q: %T, %T", column.Name, change, c) + } + change = changes[i] + case *schema.ModifyColumn: + if c.To.Name != column.Name { + break + } + if change != nil { + return false, fmt.Errorf("duplicate changes for column: %q: %T, %T", column.Name, change, c) + } + change = changes[i] + case *schema.DropColumn: + if c.C.Name == column.Name { + return false, fmt.Errorf("unexpected drop column: %q", column.Name) + } + } + } + switch change := change.(type) { + // We expect that new columns are added with DEFAULT/GENERATED + // values or defined as nullable if the table is not empty. + case *schema.AddColumn: + // Column modification requires special handling if it was + // converted from nullable to non-nullable with default value. + case *schema.ModifyColumn: + toC = append(toC, column.Name) + if !column.Type.Null && column.Default != nil && change.Change.Is(schema.ChangeNull|schema.ChangeDefault) { + x, err := defaultValue(column) + if err != nil { + return false, err + } + fromC = append(fromC, fmt.Sprintf("IFNULL(`%[1]s`, %s) AS `%[1]s`", column.Name, x)) + } else { + fromC = append(fromC, column.Name) + } + // Columns without changes should be transferred as-is. + case nil: + toC = append(toC, column.Name) + fromC = append(fromC, column.Name) + } + } + insert := len(toC) > 0 + if insert { + s.append(&migrate.Change{ + Cmd: fmt.Sprintf( + "INSERT INTO `%s` (%s) SELECT %s FROM `%s`", + to.Name, identComma(toC), identComma(fromC), from.Name, + ), + Comment: fmt.Sprintf("copy rows from old table %q to new temporary table %q", from.Name, to.Name), + }) + } + return insert, nil +} + +// alterTable alters the table with the given changes. Assuming the changes are "alterable". +func (s *state) alterTable(modify *schema.ModifyTable) error { + for _, change := range modify.Changes { + switch change := change.(type) { + case *schema.AddIndex: + if err := s.addIndexes(modify.T, change.I); err != nil { + return err + } + case *schema.DropIndex: + if err := s.dropIndexes(modify.T, change.I); err != nil { + return err + } + case *schema.RenameIndex: + if err := s.addIndexes(modify.T, change.To); err != nil { + return err + } + if err := s.dropIndexes(modify.T, change.From); err != nil { + return err + } + case *schema.AddColumn: + b := s.Build("ALTER TABLE").Ident(modify.T.Name) + r := b.Clone() + if err := s.column(b.P("ADD COLUMN"), change.C); err != nil { + return err + } + s.append(&migrate.Change{ + Source: change, + Cmd: b.String(), + Reverse: r.P("DROP COLUMN").Ident(change.C.Name).String(), + Comment: fmt.Sprintf("add column %q to table: %q", change.C.Name, modify.T.Name), + }) + case *schema.RenameColumn: + b := s.Build("ALTER TABLE").Ident(modify.T.Name).P("RENAME COLUMN") + r := b.Clone() + s.append(&migrate.Change{ + Source: change, + Cmd: b.Ident(change.From.Name).P("TO").Ident(change.To.Name).String(), + Reverse: r.Ident(change.To.Name).P("TO").Ident(change.From.Name).String(), + Comment: fmt.Sprintf("rename a column from %q to %q", change.From.Name, change.To.Name), + }) + default: + return fmt.Errorf("unexpected change in alter table: %T", change) + } + } + return nil +} + +// tableSeq sets the sequence value of the table if it was provided by +// the user on table creation. +func (s *state) tableSeq(ctx context.Context, add *schema.AddTable) error { + var inc AutoIncrement + switch pk := add.T.PrimaryKey; { + // Sequence was set on table attributes. + case sqlx.Has(add.T.Attrs, &inc) && inc.Seq > 0: + // Sequence was set on table primary-key (a single column PK). + case pk != nil && len(pk.Parts) == 1 && pk.Parts[0].C != nil && sqlx.Has(pk.Parts[0].C.Attrs, &inc) && inc.Seq > 0: + default: + return nil + } + // SQLite tracks the AUTOINCREMENT in the "sqlite_sequence" table that is created and initialized automatically + // whenever the first "PRIMARY KEY AUTOINCREMENT" is created. However, rows in this table are populated after the + // first insertion to the associated table (name, seq). Therefore, we check if the sequence table and the row exist, + // and in case they are not, we insert a new non-zero sequence to it. + rows, err := s.QueryContext(ctx, "SELECT seq FROM sqlite_sequence WHERE name = ?", add.T.Name) + if err != nil || !rows.Next() { + s.append(&migrate.Change{ + Cmd: fmt.Sprintf("INSERT INTO sqlite_sequence (name, seq) VALUES (%q, %d)", add.T.Name, inc.Seq), + Source: add, + Reverse: fmt.Sprintf("UPDATE sqlite_sequence SET seq = 0 WHERE name = %q", add.T.Name), + Comment: fmt.Sprintf("set sequence for %q table", add.T.Name), + }) + } + if rows != nil { + return rows.Close() + } + return nil +} + +func (s *state) append(c *migrate.Change) { + s.Changes = append(s.Changes, c) +} + +func alterable(modify *schema.ModifyTable) bool { + for _, change := range modify.Changes { + switch change := change.(type) { + case *schema.RenameColumn, *schema.RenameIndex, *schema.DropIndex, *schema.AddIndex: + case *schema.AddColumn: + if len(change.C.Indexes) > 0 || len(change.C.ForeignKeys) > 0 || change.C.Default != nil { + return false + } + // Only VIRTUAL generated columns can be added using ALTER TABLE. + if x := (schema.GeneratedExpr{}); sqlx.Has(change.C.Attrs, &x) && storedOrVirtual(x.Type) == stored { + return false + } + default: + return false + } + } + return true +} + +// checks writes the CHECK constraint to the builder. +func check(b *sqlx.Builder, c *schema.Check) { + expr := c.Expr + // Expressions should be wrapped with parens. + if t := strings.TrimSpace(expr); !strings.HasPrefix(t, "(") || !strings.HasSuffix(t, ")") { + expr = "(" + t + ")" + } + if c.Name != "" { + b.P("CONSTRAINT").Ident(c.Name) + } + b.P("CHECK", expr) +} + +func autoincPK(pk *schema.Index) bool { + return sqlx.Has(pk.Attrs, &AutoIncrement{}) || + len(pk.Parts) == 1 && pk.Parts[0].C != nil && sqlx.Has(pk.Parts[0].C.Attrs, &AutoIncrement{}) +} + +// Build instantiates a new builder and writes the given phrase to it. +func (s *state) Build(phrases ...string) *sqlx.Builder { + b := &sqlx.Builder{QuoteChar: '`', Schema: s.SchemaQualifier, Indent: s.Indent} + return b.P(phrases...) +} + +func defaultValue(c *schema.Column) (string, error) { + switch x := c.Default.(type) { + case *schema.Literal: + switch c.Type.Type.(type) { + case *schema.BoolType, *schema.DecimalType, *schema.IntegerType, *schema.FloatType: + return x.V, nil + default: + return sqlx.SingleQuote(x.V) + } + case *schema.RawExpr: + return x.X, nil + default: + return "", fmt.Errorf("unexpected default value type: %T", x) + } +} + +func identComma(c []string) string { + b := &sqlx.Builder{QuoteChar: '`'} + b.MapComma(c, func(i int, b *sqlx.Builder) { + if strings.ContainsRune(c[i], '`') { + b.WriteString(c[i]) + } else { + b.Ident(c[i]) + } + }) + return b.String() +} diff --git a/vendor/ariga.io/atlas/sql/sqlite/sqlspec.go b/vendor/ariga.io/atlas/sql/sqlite/sqlspec.go new file mode 100644 index 00000000..4658168a --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlite/sqlspec.go @@ -0,0 +1,233 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlite + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "ariga.io/atlas/schemahcl" + "ariga.io/atlas/sql/internal/specutil" + "ariga.io/atlas/sql/internal/sqlx" + "ariga.io/atlas/sql/schema" + "ariga.io/atlas/sql/sqlspec" + + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/zclconf/go-cty/cty" +) + +// evalSpec evaluates an Atlas DDL document using an unmarshaler into v by using the input. +func evalSpec(p *hclparse.Parser, v any, input map[string]cty.Value) error { + switch v := v.(type) { + case *schema.Realm: + var d doc + if err := hclState.Eval(p, &d, input); err != nil { + return err + } + err := specutil.Scan(v, d.Schemas, d.Tables, convertTable) + if err != nil { + return fmt.Errorf("specutil: failed converting to *schema.Realm: %w", err) + } + case *schema.Schema: + var d doc + if err := hclState.Eval(p, &d, input); err != nil { + return err + } + if len(d.Schemas) != 1 { + return fmt.Errorf("specutil: expecting document to contain a single schema, got %d", len(d.Schemas)) + } + var r schema.Realm + if err := specutil.Scan(&r, d.Schemas, d.Tables, convertTable); err != nil { + return err + } + r.Schemas[0].Realm = nil + *v = *r.Schemas[0] + case schema.Schema, schema.Realm: + return fmt.Errorf("sqlite: Eval expects a pointer: received %[1]T, expected *%[1]T", v) + default: + return hclState.Eval(p, v, input) + } + return nil +} + +// MarshalSpec marshals v into an Atlas DDL document using a schemahcl.Marshaler. +func MarshalSpec(v any, marshaler schemahcl.Marshaler) ([]byte, error) { + return specutil.Marshal(v, marshaler, schemaSpec) +} + +// convertTable converts a sqlspec.Table to a schema.Table. Table conversion is done without converting +// ForeignKeySpecs into ForeignKeys, as the target tables do not necessarily exist in the schema +// at this point. Instead, the linking is done by the convertSchema function. +func convertTable(spec *sqlspec.Table, parent *schema.Schema) (*schema.Table, error) { + return specutil.Table(spec, parent, convertColumn, specutil.PrimaryKey, convertIndex, specutil.Check) +} + +// convertIndex converts a sqlspec.Index into a schema.Index. +func convertIndex(spec *sqlspec.Index, t *schema.Table) (*schema.Index, error) { + idx, err := specutil.Index(spec, t) + if err != nil { + return nil, err + } + if attr, ok := spec.Attr("where"); ok { + p, err := attr.String() + if err != nil { + return nil, err + } + idx.Attrs = append(idx.Attrs, &IndexPredicate{P: p}) + } + return idx, nil +} + +// convertColumn converts a sqlspec.Column into a schema.Column. +func convertColumn(spec *sqlspec.Column, _ *schema.Table) (*schema.Column, error) { + c, err := specutil.Column(spec, convertColumnType) + if err != nil { + return nil, err + } + if attr, ok := spec.Attr("auto_increment"); ok { + b, err := attr.Bool() + if err != nil { + return nil, err + } + if b { + c.AddAttrs(&AutoIncrement{}) + } + } + if err := specutil.ConvertGenExpr(spec.Remain(), c, storedOrVirtual); err != nil { + return nil, err + } + return c, nil +} + +// convertColumnType converts a sqlspec.Column into a concrete SQLite schema.Type. +func convertColumnType(spec *sqlspec.Column) (schema.Type, error) { + return TypeRegistry.Type(spec.Type, spec.Extra.Attrs) +} + +// schemaSpec converts from a concrete SQLite schema to Atlas specification. +func schemaSpec(schem *schema.Schema) (*sqlspec.Schema, []*sqlspec.Table, error) { + return specutil.FromSchema(schem, tableSpec) +} + +// tableSpec converts from a concrete SQLite sqlspec.Table to a schema.Table. +func tableSpec(tab *schema.Table) (*sqlspec.Table, error) { + return specutil.FromTable( + tab, + columnSpec, + specutil.FromPrimaryKey, + indexSpec, + specutil.FromForeignKey, + specutil.FromCheck, + ) +} + +func indexSpec(idx *schema.Index) (*sqlspec.Index, error) { + spec, err := specutil.FromIndex(idx) + if err != nil { + return nil, err + } + if i := (IndexPredicate{}); sqlx.Has(idx.Attrs, &i) && i.P != "" { + spec.Extra.Attrs = append(spec.Extra.Attrs, specutil.VarAttr("where", strconv.Quote(i.P))) + } + return spec, nil +} + +// columnSpec converts from a concrete SQLite schema.Column into a sqlspec.Column. +func columnSpec(c *schema.Column, _ *schema.Table) (*sqlspec.Column, error) { + s, err := specutil.FromColumn(c, columnTypeSpec) + if err != nil { + return nil, err + } + if sqlx.Has(c.Attrs, &AutoIncrement{}) { + s.Extra.Attrs = append(s.Extra.Attrs, schemahcl.BoolAttr("auto_increment", true)) + } + if x := (schema.GeneratedExpr{}); sqlx.Has(c.Attrs, &x) { + s.Extra.Children = append(s.Extra.Children, specutil.FromGenExpr(x, storedOrVirtual)) + } + return s, nil +} + +// columnTypeSpec converts from a concrete MySQL schema.Type into sqlspec.Column Type. +func columnTypeSpec(t schema.Type) (*sqlspec.Column, error) { + st, err := TypeRegistry.Convert(t) + if err != nil { + return nil, err + } + return &sqlspec.Column{Type: st}, nil +} + +// TypeRegistry contains the supported TypeSpecs for the sqlite driver. +var TypeRegistry = schemahcl.NewRegistry( + schemahcl.WithFormatter(FormatType), + schemahcl.WithParser(ParseType), + schemahcl.WithSpecs( + schemahcl.NewTypeSpec(TypeReal, schemahcl.WithAttributes(&schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false}, &schemahcl.TypeAttr{Name: "scale", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec(TypeBlob, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeText, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec(TypeInteger, schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("int", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("tinyint", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("smallint", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("mediumint", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("bigint", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.AliasTypeSpec("unsigned_big_int", "unsigned big int", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("int2", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("int8", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("uint64", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("double", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.AliasTypeSpec("double_precision", "double precision", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("float", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("character", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("varchar", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.AliasTypeSpec("varying_character", "varying character", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("nchar", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.AliasTypeSpec("native_character", "native character", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("nvarchar", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("clob", schemahcl.WithAttributes(schemahcl.SizeTypeAttr(false))), + schemahcl.NewTypeSpec("numeric", schemahcl.WithAttributes(&schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false}, &schemahcl.TypeAttr{Name: "scale", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec("decimal", schemahcl.WithAttributes(&schemahcl.TypeAttr{Name: "precision", Kind: reflect.Int, Required: false}, &schemahcl.TypeAttr{Name: "scale", Kind: reflect.Int, Required: false})), + schemahcl.NewTypeSpec("bool"), + schemahcl.NewTypeSpec("boolean"), + schemahcl.NewTypeSpec("date"), + schemahcl.NewTypeSpec("datetime"), + schemahcl.NewTypeSpec("json"), + schemahcl.NewTypeSpec("uuid"), + ), +) + +var ( + hclState = schemahcl.New( + schemahcl.WithTypes("table.column.type", TypeRegistry.Specs()), + schemahcl.WithScopedEnums("table.column.as.type", stored, virtual), + schemahcl.WithScopedEnums("table.foreign_key.on_update", specutil.ReferenceVars...), + schemahcl.WithScopedEnums("table.foreign_key.on_delete", specutil.ReferenceVars...), + ) + // MarshalHCL marshals v into an Atlas HCL DDL document. + MarshalHCL = schemahcl.MarshalerFunc(func(v any) ([]byte, error) { + return MarshalSpec(v, hclState) + }) + // EvalHCL implements the schemahcl.Evaluator interface. + EvalHCL = schemahcl.EvalFunc(evalSpec) + + // EvalHCLBytes is a helper that evaluates an HCL document from a byte slice instead + // of from an hclparse.Parser instance. + EvalHCLBytes = specutil.HCLBytesFunc(EvalHCL) +) + +// storedOrVirtual returns a STORED or VIRTUAL +// generated type option based on the given string. +func storedOrVirtual(s string) string { + if s = strings.ToUpper(s); s == "" { + return virtual + } + return s +} + +type doc struct { + Tables []*sqlspec.Table `spec:"table"` + Schemas []*sqlspec.Schema `spec:"schema"` +} diff --git a/vendor/ariga.io/atlas/sql/sqlspec/BUILD b/vendor/ariga.io/atlas/sql/sqlspec/BUILD new file mode 100644 index 00000000..72df5842 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlspec/BUILD @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sqlspec", + srcs = ["sqlspec.go"], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/sqlspec", + importpath = "ariga.io/atlas/sql/sqlspec", + visibility = ["//visibility:public"], + deps = [ + "//vendor/ariga.io/atlas/schemahcl", + "//vendor/github.com/zclconf/go-cty/cty", + ], +) diff --git a/vendor/ariga.io/atlas/sql/sqlspec/sqlspec.go b/vendor/ariga.io/atlas/sql/sqlspec/sqlspec.go new file mode 100644 index 00000000..580d4e06 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqlspec/sqlspec.go @@ -0,0 +1,89 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlspec + +import ( + "ariga.io/atlas/schemahcl" + + "github.com/zclconf/go-cty/cty" +) + +type ( + // Schema holds a specification for a Schema. + Schema struct { + Name string `spec:"name,name"` + schemahcl.DefaultExtension + } + + // Table holds a specification for an SQL table. + Table struct { + Name string `spec:",name"` + Qualifier string `spec:",qualifier"` + Schema *schemahcl.Ref `spec:"schema"` + Columns []*Column `spec:"column"` + PrimaryKey *PrimaryKey `spec:"primary_key"` + ForeignKeys []*ForeignKey `spec:"foreign_key"` + Indexes []*Index `spec:"index"` + Checks []*Check `spec:"check"` + schemahcl.DefaultExtension + } + + // Column holds a specification for a column in an SQL table. + Column struct { + Name string `spec:",name"` + Null bool `spec:"null"` + Type *schemahcl.Type `spec:"type"` + Default cty.Value `spec:"default"` + schemahcl.DefaultExtension + } + + // PrimaryKey holds a specification for the primary key of a table. + PrimaryKey struct { + Columns []*schemahcl.Ref `spec:"columns"` + schemahcl.DefaultExtension + } + + // Index holds a specification for the index key of a table. + Index struct { + Name string `spec:",name"` + Unique bool `spec:"unique,omitempty"` + Parts []*IndexPart `spec:"on"` + Columns []*schemahcl.Ref `spec:"columns"` + schemahcl.DefaultExtension + } + + // IndexPart holds a specification for the index key part. + IndexPart struct { + Desc bool `spec:"desc,omitempty"` + Column *schemahcl.Ref `spec:"column"` + Expr string `spec:"expr,omitempty"` + schemahcl.DefaultExtension + } + + // Check holds a specification for a check constraint on a table. + Check struct { + Name string `spec:",name"` + Expr string `spec:"expr"` + schemahcl.DefaultExtension + } + + // ForeignKey holds a specification for the Foreign key of a table. + ForeignKey struct { + Symbol string `spec:",name"` + Columns []*schemahcl.Ref `spec:"columns"` + RefColumns []*schemahcl.Ref `spec:"ref_columns"` + OnUpdate *schemahcl.Ref `spec:"on_update"` + OnDelete *schemahcl.Ref `spec:"on_delete"` + schemahcl.DefaultExtension + } + + // Type represents a database agnostic column type. + Type string +) + +func init() { + schemahcl.Register("table", &Table{}) + schemahcl.Register("schema", &Schema{}) +} diff --git a/vendor/ariga.io/atlas/sql/sqltool/BUILD b/vendor/ariga.io/atlas/sql/sqltool/BUILD new file mode 100644 index 00000000..00e9acce --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqltool/BUILD @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sqltool", + srcs = [ + "doc.go", + "hidden.go", + "hidden_windows.go", + "tool.go", + ], + importmap = "go.resf.org/peridot/vendor/ariga.io/atlas/sql/sqltool", + importpath = "ariga.io/atlas/sql/sqltool", + visibility = ["//visibility:public"], + deps = ["//vendor/ariga.io/atlas/sql/migrate"], +) diff --git a/vendor/ariga.io/atlas/sql/sqltool/doc.go b/vendor/ariga.io/atlas/sql/sqltool/doc.go new file mode 100644 index 00000000..dd055780 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqltool/doc.go @@ -0,0 +1,6 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +// Package sqltool contains logic to integrate existing tools like Flyway or Liquibase with the Atlas CLI. +package sqltool diff --git a/vendor/ariga.io/atlas/sql/sqltool/hidden.go b/vendor/ariga.io/atlas/sql/sqltool/hidden.go new file mode 100644 index 00000000..184e2dee --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqltool/hidden.go @@ -0,0 +1,13 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +//go:build !windows + +package sqltool + +import "path/filepath" + +func hidden(path string) (bool, error) { + return filepath.Base(path)[0] == '.', nil +} diff --git a/vendor/ariga.io/atlas/sql/sqltool/hidden_windows.go b/vendor/ariga.io/atlas/sql/sqltool/hidden_windows.go new file mode 100644 index 00000000..f453612b --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqltool/hidden_windows.go @@ -0,0 +1,26 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqltool + +import ( + "path/filepath" + "syscall" +) + +func hidden(path string) (bool, error) { + abs, err := filepath.Abs(path) + if err != nil { + return false, err + } + p, err := syscall.UTF16PtrFromString(abs) + if err != nil { + return false, err + } + attr, err := syscall.GetFileAttributes(p) + if err != nil { + return false, err + } + return attr&syscall.FILE_ATTRIBUTE_HIDDEN != 0, nil +} diff --git a/vendor/ariga.io/atlas/sql/sqltool/tool.go b/vendor/ariga.io/atlas/sql/sqltool/tool.go new file mode 100644 index 00000000..a0768751 --- /dev/null +++ b/vendor/ariga.io/atlas/sql/sqltool/tool.go @@ -0,0 +1,547 @@ +// Copyright 2021-present The Atlas Authors. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqltool + +import ( + "bufio" + "bytes" + "fmt" + "io/fs" + "path/filepath" + "regexp" + "sort" + "strings" + "text/template" + "time" + "unicode" + + "ariga.io/atlas/sql/migrate" +) + +var ( + // GolangMigrateFormatter returns migrate.Formatter compatible with golang-migrate/migrate. + GolangMigrateFormatter = templateFormatter( + "{{ now }}{{ with .Name }}_{{ . }}{{ end }}.up.sql", + `{{ range .Changes }}{{ with .Comment }}-- {{ println . }}{{ end }}{{ printf "%s;\n" .Cmd }}{{ end }}`, + "{{ now }}{{ with .Name }}_{{ . }}{{ end }}.down.sql", + `{{ range $c := rev .Changes }}{{ with $stmts := .ReverseStmts }}{{ with $c.Comment }}-- reverse: {{ println . }}{{ end }}{{ range $stmts }}{{ printf "%s;\n" . }}{{ end }}{{ end }}{{ end }}`, + ) + // GooseFormatter returns migrate.Formatter compatible with pressly/goose. + GooseFormatter = templateFormatter( + "{{ now }}{{ with .Name }}_{{ . }}{{ end }}.sql", + `-- +goose Up +{{ range .Changes }}{{ with .Comment }}-- {{ println . }}{{ end }}{{ printf "%s;\n" .Cmd }}{{ end }} +-- +goose Down +{{ range $c := rev .Changes }}{{ with $stmts := .ReverseStmts }}{{ with $c.Comment }}-- reverse: {{ println . }}{{ end }}{{ range $stmts }}{{ printf "%s;\n" . }}{{ end }}{{ end }}{{ end }}`, + ) + // FlywayFormatter returns migrate.Formatter compatible with Flyway. + FlywayFormatter = templateFormatter( + "V{{ now }}{{ with .Name }}__{{ . }}{{ end }}.sql", + `{{ range .Changes }}{{ with .Comment }}-- {{ println . }}{{ end }}{{ printf "%s;\n" .Cmd }}{{ end }}`, + "U{{ now }}{{ with .Name }}__{{ . }}{{ end }}.sql", + `{{ range $c := rev .Changes }}{{ with $stmts := .ReverseStmts }}{{ with $c.Comment }}-- reverse: {{ println . }}{{ end }}{{ range $stmts }}{{ printf "%s;\n" . }}{{ end }}{{ end }}{{ end }}`, + ) + // LiquibaseFormatter returns migrate.Formatter compatible with Liquibase. + LiquibaseFormatter = templateFormatter( + "{{ now }}{{ with .Name }}_{{ . }}{{ end }}.sql", + `{{- $now := now -}} +--liquibase formatted sql + +{{- range $index, $change := .Changes }} +--changeset atlas:{{ $now }}-{{ inc $index }} +{{ with $change.Comment }}--comment: {{ . }}{{ end }} +{{ $change.Cmd }}; +{{ with $stmts := .ReverseStmts }}{{ range $stmts }}{{ printf "--rollback: %s;\n" . }}{{ end }}{{ end }} +{{- end }}`, + ) + // DBMateFormatter returns migrate.Formatter compatible with amacneil/dbmate. + DBMateFormatter = templateFormatter( + "{{ now }}{{ with .Name }}_{{ . }}{{ end }}.sql", + `-- migrate:up +{{ range .Changes }}{{ with .Comment }}-- {{ println . }}{{ end }}{{ printf "%s;\n" .Cmd }}{{ end }} +-- migrate:down +{{ range $c := rev .Changes }}{{ with $stmts := .ReverseStmts }}{{ with $c.Comment }}-- reverse: {{ println . }}{{ end }}{{ range $stmts }}{{ printf "%s;\n" . }}{{ end }}{{ end }}{{ end }}`, + ) + // DbmateFormatter is the same as DBMateFormatter. + // Deprecated: Use DBMateFormatter instead. + DbmateFormatter = DBMateFormatter +) + +type ( + // GolangMigrateDir wraps migrate.LocalDir and provides a migrate.Scanner implementation able to understand files + // generated by the GolangMigrateFormatter for migration directory replaying. + GolangMigrateDir struct{ *migrate.LocalDir } + // GolangMigrateFile wraps migrate.LocalFile with custom description function. + GolangMigrateFile struct{ *migrate.LocalFile } +) + +// NewGolangMigrateDir returns a new GolangMigrateDir. +func NewGolangMigrateDir(path string) (*GolangMigrateDir, error) { + dir, err := migrate.NewLocalDir(path) + if err != nil { + return nil, err + } + return &GolangMigrateDir{dir}, nil +} + +// Files implements Scanner.Files. It looks for all files with up.sql suffix and orders them by filename. +func (d *GolangMigrateDir) Files() ([]migrate.File, error) { + names, err := fs.Glob(d, "*.up.sql") + if err != nil { + return nil, err + } + // Sort files lexicographically. + sort.Slice(names, func(i, j int) bool { + return names[i] < names[j] + }) + ret := make([]migrate.File, len(names)) + for i, n := range names { + b, err := fs.ReadFile(d, n) + if err != nil { + return nil, fmt.Errorf("sql/migrate: read file %q: %w", n, err) + } + ret[i] = &GolangMigrateFile{LocalFile: migrate.NewLocalFile(n, b)} + } + return ret, nil +} + +// Desc implements File.Desc. +func (f *GolangMigrateFile) Desc() string { + return strings.TrimSuffix(f.LocalFile.Desc(), ".up") +} + +type ( + // GooseDir wraps migrate.LocalDir and provides a migrate.Scanner implementation able to understand files + // generated by the GooseFormatter for migration directory replaying. + GooseDir struct{ *migrate.LocalDir } + // GooseFile wraps migrate.LocalFile with custom statements function. + GooseFile struct{ *migrate.LocalFile } +) + +// NewGooseDir returns a new GooseDir. +func NewGooseDir(path string) (*GooseDir, error) { + dir, err := migrate.NewLocalDir(path) + if err != nil { + return nil, err + } + return &GooseDir{dir}, nil +} + +// Files looks for all files with .sql suffix and orders them by filename. +func (d *GooseDir) Files() ([]migrate.File, error) { + files, err := d.LocalDir.Files() + if err != nil { + return nil, err + } + for i, f := range files { + files[i] = &GooseFile{f.(*migrate.LocalFile)} + } + return files, nil +} + +// StmtDecls understands the migration format used by pressly/goose sql migration files. +func (f *GooseFile) StmtDecls() ([]*migrate.Stmt, error) { + // Atlas custom delimiter is per file, goose has pragma do mark start and end of a delimiter. + // In order to use the Atlas lexer, we define a custom delimiter for the source SQL and edit it to use the + // custom delimiter. + const delim = "-- ATLAS_DELIM_END" + var ( + state, lineCount int + lines = []string{"-- atlas:delimiter " + delim, ""} + sc = bufio.NewScanner(bytes.NewReader(f.Bytes())) + ) +Scan: + for sc.Scan() { + lineCount++ + line := sc.Text() + // Handle goose custom delimiters. + if strings.HasPrefix(line, goosePragma) { + switch strings.TrimSpace(strings.TrimPrefix(line, goosePragma)) { + case "Up": + switch state { + case none: // found the "up" part of the file + state = up + default: + return nil, unexpectedPragmaErr(f, lineCount, "Up") + } + case "Down": + switch state { + case up: // found the "down" part + break Scan + default: + return nil, unexpectedPragmaErr(f, lineCount, "Down") + } + case "StatementBegin": + switch state { + case up: + state = begin // begin of a statement + default: + return nil, unexpectedPragmaErr(f, lineCount, "StatementBegin") + } + case "StatementEnd": + switch state { + case begin: + state = end // end of a statement + default: + return nil, unexpectedPragmaErr(f, lineCount, "StatementEnd") + } + } + } + // Write the line of the statement. + if !reGoosePragma.MatchString(line) && state != end { + // end of statement if line ends with semicolon + line = strings.TrimRightFunc(line, unicode.IsSpace) + lines = append(lines, line) + if state == up && strings.HasSuffix(line, ";") && !strings.HasPrefix(line, "--") { + lines = append(lines, delim) + } + } + if state == end { + state = up + lines = append(lines, delim) + } + } + return migrate.Stmts(strings.Join(lines, "\n")) +} + +// Stmts understands the migration format used by pressly/goose sql migration files. +func (f *GooseFile) Stmts() ([]string, error) { + s, err := f.StmtDecls() + if err != nil { + return nil, err + } + stmts := make([]string, len(s)) + for i := range s { + stmts[i] = s[i].Text + } + return stmts, nil +} + +type ( + // DBMateDir wraps migrate.LocalDir and provides a migrate.Scanner implementation able to understand files + // generated by the DBMateFormatter for migration directory replaying. + DBMateDir struct{ *migrate.LocalDir } + // DBMateFile wraps migrate.LocalFile with custom statements function. + DBMateFile struct{ *migrate.LocalFile } +) + +// NewDBMateDir returns a new DBMateDir. +func NewDBMateDir(path string) (*DBMateDir, error) { + dir, err := migrate.NewLocalDir(path) + if err != nil { + return nil, err + } + return &DBMateDir{dir}, nil +} + +// Files looks for all files with up.sql suffix and orders them by filename. +func (d *DBMateDir) Files() ([]migrate.File, error) { + files, err := d.LocalDir.Files() + if err != nil { + return nil, err + } + for i, f := range files { + files[i] = &DBMateFile{f.(*migrate.LocalFile)} + } + return files, nil +} + +// StmtDecls understands the migration format used by amacneil/dbmate sql migration files. +func (f *DBMateFile) StmtDecls() ([]*migrate.Stmt, error) { + var ( + state, lineCount int + lines []string + sc = bufio.NewScanner(bytes.NewReader(f.Bytes())) + ) +Scan: + for sc.Scan() { + lineCount++ + line := sc.Text() + // Handle pragmas. + if strings.HasPrefix(line, dbmatePragma) { + switch strings.TrimSpace(strings.TrimPrefix(line, dbmatePragma)) { + case "up": + state = up + case "down": + break Scan + } + } + // Write the line of the statement. + if !reDBMatePragma.MatchString(line) && state == up { + lines = append(lines, line) + } + } + return migrate.Stmts(strings.Join(lines, "\n")) +} + +// Stmts understands the migration format used by amacneil/dbmate sql migration files. +func (f *DBMateFile) Stmts() ([]string, error) { + s, err := f.StmtDecls() + if err != nil { + return nil, err + } + stmts := make([]string, len(s)) + for i := range s { + stmts[i] = s[i].Text + } + return stmts, nil +} + +type ( + // FlywayDir wraps migrate.LocalDir and provides a migrate.Scanner implementation able to understand files + // generated by the FlywayFormatter for migration directory replaying. + FlywayDir struct{ *migrate.LocalDir } + // FlywayFile wraps migrate.LocalFile with custom statements function. + FlywayFile struct{ *migrate.LocalFile } +) + +// NewFlywayDir returns a new FlywayDir. +func NewFlywayDir(path string) (*FlywayDir, error) { + dir, err := migrate.NewLocalDir(path) + if err != nil { + return nil, err + } + return &FlywayDir{dir}, nil +} + +// Files implements Scanner.Files. It looks for all files with .sql suffix. The given directory is recursively scanned +// for non-hidden subdirectories. All found files will be ordered by migration type (Baseline, Versioned, Repeatable) +// and filename. +func (d *FlywayDir) Files() ([]migrate.File, error) { + var ff flywayFiles + if err := fs.WalkDir(d, "", func(path string, e fs.DirEntry, err error) error { + if err != nil { + return err + } + if path != "" && e.IsDir() { + h, err := hidden(filepath.Join(d.Path(), path)) + if err != nil { + return err + } + if h { + return fs.SkipDir + } + return nil + } + var ( + pfx = e.Name()[0] + base = filepath.Base(e.Name()) + ext = filepath.Ext(e.Name()) + ) + if ext != ".sql" || len(base) < 4 || (pfx != 'V' && pfx != 'B' && pfx != 'R') { + return nil + } + return ff.add(path) + }); err != nil { + return nil, err + } + var ( + names = ff.names() + ret = make([]migrate.File, len(names)) + ) + for i, n := range names { + b, err := fs.ReadFile(d, n) + if err != nil { + return nil, fmt.Errorf("sql/migrate: read file %q: %w", n, err) + } + ret[i] = &FlywayFile{migrate.NewLocalFile(n, b)} + } + return ret, nil +} + +// Desc implements File.Desc. +func (f FlywayFile) Desc() string { + return flywayDesc(f.Name()) +} + +// Version implements File.Version. +func (f FlywayFile) Version() string { + return flywayVersion(f.Name()) +} + +// SetRepeatableVersion iterates over the migration files and assigns repeatable migrations a version number since +// Atlas does not have the concept of repeatable migrations. Each repeatable migration file gets assigned the version +// of the preceding migration file (or 0) followed by an 'R'. +func SetRepeatableVersion(ff []migrate.File) { + // First find the index of the first repeatable migration file (if any). + var ( + v string // last versioned migration version + idx = func() int { + for i, f := range ff { + if f.Version() == "" { + return i + } + } + return -1 + }() + ) + switch idx { + case -1: + // No repeatable migration does exist. + return + case 0: + // There is no preceding migration. Use Version "0". + v = "0" + default: + v = ff[idx-1].Version() + } + if v != "" { + // Every migration file following the first repeatable found are repeatable as well. + for i, f := range ff[idx:] { + ff[idx+i] = &FlywayFile{migrate.NewLocalFile( + fmt.Sprintf("V%sR__%s", v, f.Desc()), + f.Bytes(), + )} + } + } +} + +// LiquibaseDir wraps migrate.LocalDir and provides a migrate.Scanner implementation able to understand files +// generated by the LiquibaseFormatter for migration directory replaying. +type LiquibaseDir struct{ *migrate.LocalDir } + +// NewLiquibaseDir returns a new LiquibaseDir. +func NewLiquibaseDir(path string) (*LiquibaseDir, error) { + d, err := migrate.NewLocalDir(path) + if err != nil { + return nil, err + } + return &LiquibaseDir{d}, nil +} + +const ( + none int = iota + up + begin + end + goosePragma = "-- +goose" + dbmatePragma = "-- migrate:" +) + +var ( + reGoosePragma = regexp.MustCompile(regexp.QuoteMeta(goosePragma) + " Up|Down|StatementBegin|StatementEnd") + reDBMatePragma = regexp.MustCompile(dbmatePragma + "up|down") +) + +// flywayFiles retrieves flyway migration files by calls to add(). It will only keep the latest baseline and ignore +// all versioned files that are included in that baseline. +type flywayFiles struct { + baseline string + versioned []string + repeatable []string +} + +// add the given path to the migration files according to its type. The input directory is assumed to be valid +// according to the Flyway documentation (no duplicate versions, etc.). +func (ff *flywayFiles) add(path string) error { + switch p := filepath.Base(path)[0]; p { + case 'B': + if ff.baseline != "" && flywayVersion(path) < flywayVersion(ff.baseline) { + return nil + } + ff.baseline = path + // In case we set a new baseline, remove all versioned files with a version smaller than the new baseline. + var ( + bv = flywayVersion(ff.baseline) + vs []string + ) + for _, v := range ff.versioned { + if v > bv { + vs = append(vs, v) + } + } + ff.versioned = vs + return nil + case 'V': + v := flywayVersion(path) + if ff.baseline == "" || flywayVersion(ff.baseline) < v { + ff.versioned = append(ff.versioned, path) + } + return nil + case 'R': + ff.repeatable = append(ff.repeatable, path) + return nil + default: + return fmt.Errorf("sql/sqltool: unexpected Flyway prefix %q", p) + } +} + +func (ff *flywayFiles) names() []string { + var names []string + if ff.baseline != "" { + names = append(names, ff.baseline) + } + sort.Strings(ff.versioned) + sort.Strings(ff.repeatable) + names = append(names, ff.versioned...) + names = append(names, ff.repeatable...) + return names +} + +func flywayDesc(path string) string { + parts := strings.SplitN(path, "__", 2) + if len(parts) == 1 { + return "" + } + return strings.TrimSuffix(parts[1], ".sql") +} + +func flywayVersion(path string) string { + // Repeatable migrations don't have a version. + if filepath.Base(path)[0] == 'R' { + return "" + } + return strings.SplitN(strings.TrimSuffix(filepath.Base(path), ".sql"), "__", 2)[0][1:] +} + +func unexpectedPragmaErr(f migrate.File, line int, pragma string) error { + var tool string + switch f := f.(type) { + case *GooseFile: + tool = "goose" + case *DBMateFile: + tool = "dbmate" + default: + return fmt.Errorf("sql/migrate: unexpected migration file type '%T'", f) + } + return fmt.Errorf( + "sql/migrate: %s: %s:%d unexpected goosePragma '%s'", + tool, f.Name(), line, pragma, + ) +} + +// funcs contains the template.FuncMap for the different formatters. +var funcs = template.FuncMap{ + "inc": func(x int) int { return x + 1 }, + // now formats the current time in a lexicographically ascending order while maintaining human readability. + "now": func() string { return time.Now().UTC().Format("20060102150405") }, + "rev": reverse, +} + +// templateFormatter parses the given templates and passes them on to the migrate.NewTemplateFormatter. +func templateFormatter(templates ...string) migrate.Formatter { + tpls := make([]*template.Template, len(templates)) + for i, t := range templates { + tpls[i] = template.Must(template.New("").Funcs(funcs).Parse(t)) + } + tf, err := migrate.NewTemplateFormatter(tpls...) + if err != nil { + panic(err) + } + return tf +} + +// reverse changes for the down migration. +func reverse(changes []*migrate.Change) []*migrate.Change { + n := len(changes) + rev := make([]*migrate.Change, n) + if n%2 == 1 { + rev[n/2] = changes[n/2] + } + for i, j := 0, n-1; i < j; i, j = i+1, j-1 { + rev[i], rev[j] = changes[j], changes[i] + } + return rev +} diff --git a/vendor/entgo.io/ent/.all-contributorsrc b/vendor/entgo.io/ent/.all-contributorsrc new file mode 100644 index 00000000..66692e11 --- /dev/null +++ b/vendor/entgo.io/ent/.all-contributorsrc @@ -0,0 +1,855 @@ +{ + "files": [ + "doc/md/contributors.md" + ], + "imageSize": 100, + "commit": false, + "contributors": [ + { + "login": "a8m", + "name": "Ariel Mashraki", + "avatar_url": "https://avatars.githubusercontent.com/u/7413593?v=4", + "profile": "https://github.com/a8m", + "contributions": [ + "maintenance", + "doc", + "code" + ] + }, + { + "login": "alexsn", + "name": "Alex Snast", + "avatar_url": "https://avatars.githubusercontent.com/u/987019?v=4", + "profile": "https://github.com/alexsn", + "contributions": [ + "code" + ] + }, + { + "login": "rotemtam", + "name": "Rotem Tamir", + "avatar_url": "https://avatars.githubusercontent.com/u/1522681?v=4", + "profile": "https://rotemtam.com/", + "contributions": [ + "maintenance", + "doc", + "code" + ] + }, + { + "login": "cliedeman", + "name": "Ciaran Liedeman", + "avatar_url": "https://avatars.githubusercontent.com/u/3578740?v=4", + "profile": "https://github.com/cliedeman", + "contributions": [ + "code" + ] + }, + { + "login": "marwan-at-work", + "name": "Marwan Sulaiman", + "avatar_url": "https://avatars.githubusercontent.com/u/16294261?v=4", + "profile": "https://www.marwan.io/", + "contributions": [ + "code" + ] + }, + { + "login": "napei", + "name": "Nathaniel Peiffer", + "avatar_url": "https://avatars.githubusercontent.com/u/8946502?v=4", + "profile": "https://nathaniel.peiffer.com.au/", + "contributions": [ + "code" + ] + }, + { + "login": "tmc", + "name": "Travis Cline", + "avatar_url": "https://avatars.githubusercontent.com/u/3977?v=4", + "profile": "https://github.com/tmc", + "contributions": [ + "code" + ] + }, + { + "login": "hantmac", + "name": "Jeremy", + "avatar_url": "https://avatars.githubusercontent.com/u/7600925?v=4", + "profile": "https://cloudsjhan.github.io/", + "contributions": [ + "code" + ] + }, + { + "login": "aca", + "name": "aca", + "avatar_url": "https://avatars.githubusercontent.com/u/50316549?v=4", + "profile": "https://github.com/aca", + "contributions": [ + "code" + ] + }, + { + "login": "BrentChesny", + "name": "BrentChesny", + "avatar_url": "https://avatars.githubusercontent.com/u/1449435?v=4", + "profile": "https://github.com/BrentChesny", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "giautm", + "name": "Giau. Tran Minh", + "avatar_url": "https://avatars.githubusercontent.com/u/12751435?v=4", + "profile": "https://github.com/giautm", + "contributions": [ + "code", + "review" + ] + }, + { + "login": "htdvisser", + "name": "Hylke Visser", + "avatar_url": "https://avatars.githubusercontent.com/u/181308?v=4", + "profile": "https://htdvisser.dev/", + "contributions": [ + "code" + ] + }, + { + "login": "kerbelp", + "name": "Pavel Kerbel", + "avatar_url": "https://avatars.githubusercontent.com/u/3934990?v=4", + "profile": "https://github.com/kerbelp", + "contributions": [ + "code" + ] + }, + { + "login": "day-dreams", + "name": "zhangnan", + "avatar_url": "https://avatars.githubusercontent.com/u/24593904?v=4", + "profile": "https://github.com/day-dreams", + "contributions": [ + "code" + ] + }, + { + "login": "uta-mori", + "name": "mori yuta", + "avatar_url": "https://avatars.githubusercontent.com/u/59682979?v=4", + "profile": "https://github.com/uta-mori", + "contributions": [ + "code", + "translation", + "review" + ] + }, + { + "login": "chris-rock", + "name": "Christoph Hartmann", + "avatar_url": "https://avatars.githubusercontent.com/u/1178413?v=4", + "profile": "http://lollyrock.com/", + "contributions": [ + "code" + ] + }, + { + "login": "rubensayshi", + "name": "Ruben de Vries", + "avatar_url": "https://avatars.githubusercontent.com/u/649160?v=4", + "profile": "https://github.com/rubensayshi", + "contributions": [ + "code" + ] + }, + { + "login": "ernado", + "name": "Aleksandr Razumov", + "avatar_url": "https://avatars.githubusercontent.com/u/866677?v=4", + "profile": "https://keybase.io/ernado", + "contributions": [ + "code" + ] + }, + { + "login": "apbuteau", + "name": "apbuteau", + "avatar_url": "https://avatars.githubusercontent.com/u/6796073?v=4", + "profile": "https://github.com/apbuteau", + "contributions": [ + "code" + ] + }, + { + "login": "ichord", + "name": "Harold.Luo", + "avatar_url": "https://avatars.githubusercontent.com/u/1324791?v=4", + "profile": "https://github.com/ichord", + "contributions": [ + "code" + ] + }, + { + "login": "idoshveki", + "name": "ido shveki", + "avatar_url": "https://avatars.githubusercontent.com/u/11615669?v=4", + "profile": "https://github.com/idoshveki", + "contributions": [ + "code" + ] + }, + { + "login": "masseelch", + "name": "MasseElch", + "avatar_url": "https://avatars.githubusercontent.com/u/12862103?v=4", + "profile": "https://github.com/masseelch", + "contributions": [ + "code" + ] + }, + { + "login": "kidlj", + "name": "Jian Li", + "avatar_url": "https://avatars.githubusercontent.com/u/300616?v=4", + "profile": "https://github.com/kidlj", + "contributions": [ + "code" + ] + }, + { + "login": "nolotz", + "name": "Noah-Jerome Lotzer", + "avatar_url": "https://avatars.githubusercontent.com/u/5778728?v=4", + "profile": "https://noah.je/", + "contributions": [ + "code" + ] + }, + { + "login": "danf0rth", + "name": "danforth", + "avatar_url": "https://avatars.githubusercontent.com/u/14220891?v=4", + "profile": "https://github.com/danf0rth", + "contributions": [ + "code" + ] + }, + { + "login": "maxiloEmmmm", + "name": "maxilozoz", + "avatar_url": "https://avatars.githubusercontent.com/u/16779121?v=4", + "profile": "https://github.com/maxiloEmmmm", + "contributions": [ + "code" + ] + }, + { + "login": "zzwx", + "name": "zzwx", + "avatar_url": "https://avatars.githubusercontent.com/u/8169082?v=4", + "profile": "https://gist.github.com/zzwx", + "contributions": [ + "code" + ] + }, + { + "login": "ix64", + "name": "MengYX", + "avatar_url": "https://avatars.githubusercontent.com/u/13902388?v=4", + "profile": "https://github.com/ix64", + "contributions": [ + "translation" + ] + }, + { + "login": "mattn", + "name": "mattn", + "avatar_url": "https://avatars.githubusercontent.com/u/10111?v=4", + "profile": "https://mattn.kaoriya.net/", + "contributions": [ + "translation" + ] + }, + { + "login": "Bladrak", + "name": "Hugo Briand", + "avatar_url": "https://avatars.githubusercontent.com/u/1321977?v=4", + "profile": "https://github.com/Bladrak", + "contributions": [ + "code" + ] + }, + { + "login": "enmand", + "name": "Dan Enman", + "avatar_url": "https://avatars.githubusercontent.com/u/432487?v=4", + "profile": "https://danielenman.com/", + "contributions": [ + "code" + ] + }, + { + "login": "UnAfraid", + "name": "Rumen Nikiforov", + "avatar_url": "https://avatars.githubusercontent.com/u/2185291?v=4", + "profile": "http://www.l2junity.org/", + "contributions": [ + "code" + ] + }, + { + "login": "wenerme", + "name": "陈杨文", + "avatar_url": "https://avatars.githubusercontent.com/u/1777211?v=4", + "profile": "https://wener.me", + "contributions": [ + "code" + ] + }, + { + "login": "joesonw", + "name": "Qiaosen (Joeson) Huang", + "avatar_url": "https://avatars.githubusercontent.com/u/1635441?v=4", + "profile": "https://djwong.net", + "contributions": [ + "bug" + ] + }, + { + "login": "davebehr1", + "name": "AlonDavidBehr", + "avatar_url": "https://avatars.githubusercontent.com/u/16716239?v=4", + "profile": "https://github.com/davebehr1", + "contributions": [ + "code", + "review" + ] + }, + { + "login": "DuGlaser", + "name": "DuGlaser", + "avatar_url": "https://avatars.githubusercontent.com/u/50506482?v=4", + "profile": "http://duglaser.dev", + "contributions": [ + "doc" + ] + }, + { + "login": "shanna", + "name": "Shane Hanna", + "avatar_url": "https://avatars.githubusercontent.com/u/28489?v=4", + "profile": "https://github.com/shanna", + "contributions": [ + "doc" + ] + }, + { + "login": "mahmud2011", + "name": "Mahmudul Haque", + "avatar_url": "https://avatars.githubusercontent.com/u/5278142?v=4", + "profile": "https://www.linkedin.com/in/mahmud2011", + "contributions": [ + "code" + ] + }, + { + "login": "sywesk", + "name": "Benjamin Bourgeais", + "avatar_url": "https://avatars.githubusercontent.com/u/862607?v=4", + "profile": "http://blog.scaleprocess.net", + "contributions": [ + "code" + ] + }, + { + "login": "8ayac", + "name": "8ayac(Yoshinori Hayashi)", + "avatar_url": "https://avatars.githubusercontent.com/u/29266382?v=4", + "profile": "https://about.8ay.ac/", + "contributions": [ + "doc" + ] + }, + { + "login": "y-yagi", + "name": "y-yagi", + "avatar_url": "https://avatars.githubusercontent.com/u/987638?v=4", + "profile": "https://github.com/y-yagi", + "contributions": [ + "doc" + ] + }, + { + "login": "Sacro", + "name": "Ben Woodward", + "avatar_url": "https://avatars.githubusercontent.com/u/2659869?v=4", + "profile": "https://github.com/Sacro", + "contributions": [ + "code" + ] + }, + { + "login": "wzyjerry", + "name": "WzyJerry", + "avatar_url": "https://avatars.githubusercontent.com/u/11435169?v=4", + "profile": "https://github.com/wzyjerry", + "contributions": [ + "code" + ] + }, + { + "login": "tarrencev", + "name": "Tarrence van As", + "avatar_url": "https://avatars.githubusercontent.com/u/4740651?v=4", + "profile": "https://github.com/tarrencev", + "contributions": [ + "doc", + "code" + ] + }, + { + "login": "MONAKA0721", + "name": "Yuya Sumie", + "avatar_url": "https://avatars.githubusercontent.com/u/32859963?v=4", + "profile": "https://mo7ka.com", + "contributions": [ + "doc" + ] + }, + { + "login": "akfaew", + "name": "Michal Mazurek", + "avatar_url": "https://avatars.githubusercontent.com/u/7853732?v=4", + "profile": "http://jasminek.net", + "contributions": [ + "code" + ] + }, + { + "login": "nmemoto", + "name": "Takafumi Umemoto", + "avatar_url": "https://avatars.githubusercontent.com/u/1522332?v=4", + "profile": "https://github.com/nmemoto", + "contributions": [ + "doc" + ] + }, + { + "login": "squarebat", + "name": "Khadija Sidhpuri", + "avatar_url": "https://avatars.githubusercontent.com/u/59063821?v=4", + "profile": "http://www.linkedin.com/in/khadija-sidhpuri-87709316a", + "contributions": [ + "code" + ] + }, + { + "login": "neel229", + "name": "Neel Modi", + "avatar_url": "https://avatars.githubusercontent.com/u/53475167?v=4", + "profile": "https://github.com/neel229", + "contributions": [ + "code" + ] + }, + { + "login": "shomodj", + "name": "Boris Shomodjvarac", + "avatar_url": "https://avatars.githubusercontent.com/u/304768?v=4", + "profile": "https://ie.linkedin.com/in/boris-shomodjvarac-51970879", + "contributions": [ + "doc" + ] + }, + { + "login": "sadmansakib", + "name": "Sadman Sakib", + "avatar_url": "https://avatars.githubusercontent.com/u/17023844?v=4", + "profile": "https://github.com/sadmansakib", + "contributions": [ + "doc" + ] + }, + { + "login": "dakimura", + "name": "dakimura", + "avatar_url": "https://avatars.githubusercontent.com/u/34202807?v=4", + "profile": "https://github.com/dakimura", + "contributions": [ + "code" + ] + }, + { + "login": "RiskyFeryansyahP", + "name": "Risky Feryansyah", + "avatar_url": "https://avatars.githubusercontent.com/u/36788585?v=4", + "profile": "https://github.com/RiskyFeryansyahP", + "contributions": [ + "code" + ] + }, + { + "login": "seiichi1101", + "name": "seiichi ", + "avatar_url": "https://avatars.githubusercontent.com/u/20941952?v=4", + "profile": "https://github.com/seiichi1101", + "contributions": [ + "code" + ] + }, + { + "login": "odeke-em", + "name": "Emmanuel T Odeke", + "avatar_url": "https://avatars.githubusercontent.com/u/4898263?v=4", + "profile": "https://orijtech.com/", + "contributions": [ + "code" + ] + }, + { + "login": "isoppp", + "name": "Hiroki Isogai", + "avatar_url": "https://avatars.githubusercontent.com/u/16318727?v=4", + "profile": "https://isoppp.com", + "contributions": [ + "doc" + ] + }, + { + "login": "tsingsun", + "name": "李清山", + "avatar_url": "https://avatars.githubusercontent.com/u/5848549?v=4", + "profile": "https://github.com/tsingsun", + "contributions": [ + "code" + ] + }, + { + "login": "s-takehana", + "name": "s-takehana", + "avatar_url": "https://avatars.githubusercontent.com/u/3423547?v=4", + "profile": "https://github.com/s-takehana", + "contributions": [ + "doc" + ] + }, + { + "login": "EndlessIdea", + "name": "Kuiba", + "avatar_url": "https://avatars.githubusercontent.com/u/1527796?v=4", + "profile": "https://github.com/EndlessIdea", + "contributions": [ + "code" + ] + }, + { + "login": "storyicon", + "name": "storyicon", + "avatar_url": "https://avatars.githubusercontent.com/u/29772821?v=4", + "profile": "https://github.com/storyicon", + "contributions": [ + "code" + ] + }, + { + "login": "evanlurvey", + "name": "Evan Lurvey", + "avatar_url": "https://avatars.githubusercontent.com/u/54965655?v=4", + "profile": "https://github.com/evanlurvey", + "contributions": [ + "code" + ] + }, + { + "login": "attackordie", + "name": "Brian", + "avatar_url": "https://avatars.githubusercontent.com/u/20145334?v=4", + "profile": "https://github.com/attackordie", + "contributions": [ + "doc" + ] + }, + { + "login": "ThinkontrolSY", + "name": "Shen Yang", + "avatar_url": "https://avatars.githubusercontent.com/u/11331554?v=4", + "profile": "http://www.thinkontrol.com", + "contributions": [ + "code" + ] + }, + { + "login": "sivchari", + "name": "sivchari", + "avatar_url": "https://avatars.githubusercontent.com/u/55221074?v=4", + "profile": "https://twitter.com/sivchari", + "contributions": [ + "code" + ] + }, + { + "login": "mookjp", + "name": "mook", + "avatar_url": "https://avatars.githubusercontent.com/u/1519309?v=4", + "profile": "https://blog.mookjp.io", + "contributions": [ + "code" + ] + }, + { + "login": "heliumbrain", + "name": "heliumbrain", + "avatar_url": "https://avatars.githubusercontent.com/u/1607668?v=4", + "profile": "http://www.entiros.se", + "contributions": [ + "doc" + ] + }, + { + "login": "JeremyV2014", + "name": "Jeremy Maxey-Vesperman", + "avatar_url": "https://avatars.githubusercontent.com/u/9276415?v=4", + "profile": "https://github.com/JeremyV2014", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "tankbusta", + "name": "Christopher Schmitt", + "avatar_url": "https://avatars.githubusercontent.com/u/592749?v=4", + "profile": "https://github.com/tankbusta", + "contributions": [ + "doc" + ] + }, + { + "login": "grevych", + "name": "Gerardo Reyes", + "avatar_url": "https://avatars.githubusercontent.com/u/3792003?v=4", + "profile": "https://github.com/grevych", + "contributions": [ + "code" + ] + }, + { + "login": "naormatania", + "name": "Naor Matania", + "avatar_url": "https://avatars.githubusercontent.com/u/6978437?v=4", + "profile": "https://github.com/naormatania", + "contributions": [ + "code" + ] + }, + { + "login": "idc77", + "name": "idc77", + "avatar_url": "https://avatars.githubusercontent.com/u/87644834?v=4", + "profile": "https://github.com/idc77", + "contributions": [ + "doc" + ] + }, + { + "login": "HurSungYun", + "name": "Sungyun Hur", + "avatar_url": "https://avatars.githubusercontent.com/u/8033896?v=4", + "profile": "http://ethanhur.me", + "contributions": [ + "doc" + ] + }, + { + "login": "peanut-cc", + "name": "peanut-pg", + "avatar_url": "https://avatars.githubusercontent.com/u/55480838?v=4", + "profile": "https://github.com/peanut-cc", + "contributions": [ + "doc" + ] + }, + { + "login": "m3hm3t", + "name": "Mehmet Yılmaz", + "avatar_url": "https://avatars.githubusercontent.com/u/22320354?v=4", + "profile": "https://github.com/m3hm3t", + "contributions": [ + "code" + ] + }, + { + "login": "Laconty", + "name": "Roman Maklakov", + "avatar_url": "https://avatars.githubusercontent.com/u/17760166?v=4", + "profile": "https://github.com/Laconty", + "contributions": [ + "code" + ] + }, + { + "login": "genevieve", + "name": "Genevieve", + "avatar_url": "https://avatars.githubusercontent.com/u/12158641?v=4", + "profile": "https://github.com/genevieve", + "contributions": [ + "code" + ] + }, + { + "login": "cjraa", + "name": "Clarence", + "avatar_url": "https://avatars.githubusercontent.com/u/62199269?v=4", + "profile": "https://github.com/cjraa", + "contributions": [ + "code" + ] + }, + { + "login": "iamnande", + "name": "Nicholas Anderson", + "avatar_url": "https://avatars.githubusercontent.com/u/7806510?v=4", + "profile": "https://www.linkedin.com/in/iamnande/", + "contributions": [ + "code" + ] + }, + { + "login": "hezhizhen", + "name": "Zhizhen He", + "avatar_url": "https://avatars.githubusercontent.com/u/7611700?v=4", + "profile": "https://github.com/hezhizhen", + "contributions": [ + "code" + ] + }, + { + "login": "crossworth", + "name": "Pedro Henrique", + "avatar_url": "https://avatars.githubusercontent.com/u/1251151?v=4", + "profile": "https://pedro.dev.br", + "contributions": [ + "code" + ] + }, + { + "login": "MrParano1d", + "name": "MrParano1d", + "avatar_url": "https://avatars.githubusercontent.com/u/7414374?v=4", + "profile": "https://2jp.de", + "contributions": [ + "code" + ] + }, + { + "login": "tprebs", + "name": "Thomas Prebble", + "avatar_url": "https://avatars.githubusercontent.com/u/6523587?v=4", + "profile": "https://github.com/tprebs", + "contributions": [ + "code" + ] + }, + { + "login": "imhuytq", + "name": "Huy TQ", + "avatar_url": "https://avatars.githubusercontent.com/u/5723282?v=4", + "profile": "https://huytq.com", + "contributions": [ + "code" + ] + }, + { + "login": "maorlipchuk", + "name": "maorlipchuk", + "avatar_url": "https://avatars.githubusercontent.com/u/7034637?v=4", + "profile": "https://github.com/maorlipchuk", + "contributions": [ + "code" + ] + }, + { + "login": "iwata", + "name": "Motonori Iwata", + "avatar_url": "https://avatars.githubusercontent.com/u/121048?v=4", + "profile": "https://mobcov.hatenadiary.org/", + "contributions": [ + "doc" + ] + }, + { + "login": "CharlesGe129", + "name": "Charles Ge", + "avatar_url": "https://avatars.githubusercontent.com/u/20162173?v=4", + "profile": "https://github.com/CharlesGe129", + "contributions": [ + "code" + ] + }, + { + "login": "thmeitz", + "name": "Thomas Meitz", + "avatar_url": "https://avatars.githubusercontent.com/u/92851940?v=4", + "profile": "https://github.com/thmeitz", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "booleangate", + "name": "Justin Johnson", + "avatar_url": "https://avatars.githubusercontent.com/u/181567?v=4", + "profile": "http://justinjohnson.org", + "contributions": [ + "code" + ] + }, + { + "login": "hax10", + "name": "hax10", + "avatar_url": "https://avatars.githubusercontent.com/u/85743468?v=4", + "profile": "https://github.com/hax10", + "contributions": [ + "code" + ] + }, + { + "login": "water-a", + "name": "water-a", + "avatar_url": "https://avatars.githubusercontent.com/u/38114545?v=4", + "profile": "https://github.com/water-a", + "contributions": [ + "bug" + ] + }, + { + "login": "jhwz", + "name": "jhwz", + "avatar_url": "https://avatars.githubusercontent.com/u/52683873?v=4", + "profile": "https://github.com/jhwz", + "contributions": [ + "doc" + ] + }, + { + "login": "kortschak", + "name": "Dan Kortschak", + "avatar_url": "https://avatars.githubusercontent.com/u/275221?v=4", + "profile": "https://kortschak.io/", + "contributions": [ + "doc" + ] + } + ], + "contributorsPerLine": 7, + "projectName": "ent", + "projectOwner": "ent", + "repoType": "github", + "repoHost": "https://github.com", + "skipCi": true +} diff --git a/vendor/entgo.io/ent/.golangci.yml b/vendor/entgo.io/ent/.golangci.yml new file mode 100644 index 00000000..e827f6e1 --- /dev/null +++ b/vendor/entgo.io/ent/.golangci.yml @@ -0,0 +1,73 @@ +run: + go: '1.19' + timeout: 5m + +linters-settings: + errcheck: + ignore: fmt:.*,Read|Write|Close|Exec,io:Copy + dupl: + threshold: 100 + funlen: + lines: 140 + statements: 140 + goheader: + template: |- + Copyright 2019-present Facebook Inc. All rights reserved. + This source code is licensed under the Apache 2.0 license found + in the LICENSE file in the root directory of this source tree. +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + - depguard + - dogsled + - dupl + - errcheck + - funlen + - gocritic + # - gofmt; Enable back when upgrading CI to Go 1.20. + - goheader + - gosec + - gosimple + - govet + - ineffassign + - misspell + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + - whitespace + +issues: + exclude-rules: + - path: _test\.go + linters: + - dupl + - funlen + - gosec + - gocritic + - linters: + - unused + source: ent.Schema + - path: dialect/sql/schema + linters: + - dupl + - gosec + - text: "Expect WriteFile permissions to be 0600 or less" + linters: + - gosec + - path: privacy/privacy.go + linters: + - stylecheck + - path: entc/load/schema.go + linters: + - staticcheck + - path: entc/gen/graph.go + linters: + - gocritic + - path: \.go + linters: + - staticcheck + text: SA1019 diff --git a/vendor/entgo.io/ent/BUILD b/vendor/entgo.io/ent/BUILD new file mode 100644 index 00000000..9d90a5c0 --- /dev/null +++ b/vendor/entgo.io/ent/BUILD @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ent", + srcs = [ + "ent.go", + "op_string.go", + ], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent", + importpath = "entgo.io/ent", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/schema", + "//vendor/entgo.io/ent/schema/edge", + "//vendor/entgo.io/ent/schema/field", + "//vendor/entgo.io/ent/schema/index", + ], +) diff --git a/vendor/entgo.io/ent/CODE_OF_CONDUCT.md b/vendor/entgo.io/ent/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..d1abc700 --- /dev/null +++ b/vendor/entgo.io/ent/CODE_OF_CONDUCT.md @@ -0,0 +1,77 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq + diff --git a/vendor/entgo.io/ent/CONTRIBUTING.md b/vendor/entgo.io/ent/CONTRIBUTING.md new file mode 100644 index 00000000..80de6ad8 --- /dev/null +++ b/vendor/entgo.io/ent/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing to ent +We want to make contributing to this project as easy and transparent as +possible. + +# Project structure + +- `dialect` - Contains SQL and Gremlin code used by the generated code. + - `dialect/sql/schema` - Auto migration logic resides there. + - `dialect/sql/sqljson` - JSON extension for SQL. + +- `schema` - User schema API. + - `schema/{field, edge, index, mixin}` - provides schema builders API. + - `schema/field/gen` - Templates and codegen for numeric builders. + +- `entc` - Codegen of `ent`. + - `entc/load` - `entc` loader API for loading user schemas into a Go objects at runtime. + - `entc/gen` - The actual code generation logic resides in this package (and its `templates` package). + - `integration` - Integration tests for `entc`. + +- `privacy` - Runtime code for [privacy layer](https://entgo.io/docs/privacy/). + +- `doc` - Documentation code for `entgo.io` (uses [Docusaurus](https://docusaurus.io)). + - `doc/md` - Markdown files for documentation. + - `doc/website` - Website code and assets. + + In order to test your documentation changes, run `npm start` from the `doc/website` directory, and open [localhost:3000](http://localhost:3000/). + +# Run integration tests +If you touch any file in `entc`, run the following command in `entc`: + +``` +go generate ./... +``` + +Then, in `entc/integration` run `docker-compose` in order to spin-up all database containers: + +``` +docker-compose -f docker-compose.yaml up -d +``` + +Then, run `go test ./...` to run all integration tests. + + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `master`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to ent, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/vendor/entgo.io/ent/LICENSE b/vendor/entgo.io/ent/LICENSE new file mode 100644 index 00000000..7a4a3ea2 --- /dev/null +++ b/vendor/entgo.io/ent/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/entgo.io/ent/README.md b/vendor/entgo.io/ent/README.md new file mode 100644 index 00000000..ad0af07d --- /dev/null +++ b/vendor/entgo.io/ent/README.md @@ -0,0 +1,58 @@ +## ent - An Entity Framework For Go + +[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/entgo_io.svg?style=social&label=Follow%20%40entgo_io)](https://twitter.com/entgo_io) +[![Discord](https://img.shields.io/discord/885059418646003782?label=discord&logo=discord&style=flat-square&logoColor=white)](https://discord.gg/qZmPgTE6RX) + +[English](README.md) | [中文](README_zh.md) | [日本語](README_jp.md) | [한국어](README_kr.md) + + + +Simple, yet powerful entity framework for Go, that makes it easy to build and maintain applications +with large data-models. + +- **Schema As Code** - model any database schema as Go objects. +- **Easily Traverse Any Graph** - run queries, aggregations and traverse any graph structure easily. +- **Statically Typed And Explicit API** - 100% statically typed and explicit API using code generation. +- **Multi Storage Driver** - supports MySQL, MariaDB, TiDB, PostgreSQL, CockroachDB, SQLite and Gremlin. +- **Extendable** - simple to extend and customize using Go templates. + +## Quick Installation +```console +go install entgo.io/ent/cmd/ent@latest +``` + +For proper installation using [Go modules], visit [entgo.io website][entgo install]. + +## Docs and Support +The documentation for developing and using ent is available at: https://entgo.io + +For discussion and support, [open an issue](https://github.com/ent/ent/issues/new/choose) or join our [channel](https://gophers.slack.com/archives/C01FMSQDT53) in the gophers Slack. + +## Join the ent Community +Building `ent` would not have been possible without the collective work of our entire community. We maintain a [contributors page](doc/md/contributors.md) +which lists the contributors to this `ent`. + +In order to contribute to `ent`, see the [CONTRIBUTING](CONTRIBUTING.md) file for how to go get started. +If your company or your product is using `ent`, please let us know by adding yourself to the [ent users page](https://github.com/ent/ent/wiki/ent-users). + +For updates, follow us on Twitter at https://twitter.com/entgo_io + + + +## About the Project +The `ent` project was inspired by Ent, an entity framework we use internally. It is developed and maintained +by [a8m](https://github.com/a8m) and [alexsn](https://github.com/alexsn) +from the [Facebook Connectivity][fbc] team. It is used by multiple teams and projects in production, +and the roadmap for its v1 release is described [here](https://github.com/ent/ent/issues/46). +Read more about the motivation of the project [here](https://entgo.io/blog/2019/10/03/introducing-ent). + +## License +ent is licensed under Apache 2.0 as found in the [LICENSE file](LICENSE). + + +[entgo install]: https://entgo.io/docs/code-gen/#version-compatibility-between-entc-and-ent +[Go modules]: https://github.com/golang/go/wiki/Modules#quick-start +[fbc]: https://connectivity.fb.com diff --git a/vendor/entgo.io/ent/README_jp.md b/vendor/entgo.io/ent/README_jp.md new file mode 100644 index 00000000..c1c1c117 --- /dev/null +++ b/vendor/entgo.io/ent/README_jp.md @@ -0,0 +1,54 @@ +## ent - Goのエンティティーフレームワーク + +[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/entgo_io.svg?style=social&label=Follow%20%40entgo_io)](https://twitter.com/entgo_io) + +[English](README.md) | [中文](README_zh.md) | [日本語](README_jp.md) + + + +シンプルながらもパワフルなGoのエンティティフレームワークであり、大規模なデータモデルを持つアプリケーションを容易に構築・保守できるようにします。 + +- **Schema As Code(コードとしてのスキーマ)** - あらゆるデータベーススキーマをGoオブジェクトとしてモデル化します。 +- **任意のグラフを簡単にトラバースできます** - クエリや集約の実行、任意のグラフ構造の走査を容易に実行できます。 +- **100%静的に型付けされた明示的なAPI** - コード生成により、100%静的に型付けされた曖昧さのないAPIを提供します。 +- **マルチストレージドライバ** - MySQL、MariaDB、 TiDB、PostgreSQL、CockroachDB、SQLite、Gremlinをサポートしています。 +- **拡張性** - Goテンプレートを使用して簡単に拡張、カスタマイズできます。 + +## クイックインストール +```console +go install entgo.io/ent/cmd/ent@latest +``` + +[Go modules]を使ったインストールについては、[entgo.ioのWebサイト](https://entgo.io/ja/docs/code-gen/#entc-%E3%81%A8-ent-%E3%81%AE%E3%83%90%E3%83%BC%E3%82%B8%E3%83%A7%E3%83%B3%E3%82%92%E4%B8%80%E8%87%B4%E3%81%95%E3%81%9B%E3%82%8B)をご覧ください。 + +## ドキュメントとサポート +entを開発・使用するためのドキュメントは、こちら: https://entgo.io + +議論やサポートについては、[Issueを開く](https://github.com/ent/ent/issues/new/choose)か、gophers Slackの[チャンネル](https://gophers.slack.com/archives/C01FMSQDT53)に参加してください。 + +## entコミュニティへの参加 +`ent`の構築は、コミュニティ全体の協力なしには実現できませんでした。 私たちは、この`ent`の貢献者をリストアップした[contributorsページ](doc/md/contributors.md)を管理しています。 + +`ent`に貢献するときは、まず[CONTRIBUTING](CONTRIBUTING.md)を参照してください。 +もし、あなたの会社や製品で`ent`を利用している場合は、[ent usersページ](https://github.com/ent/ent/wiki/ent-users)に追記する形で、そのことをぜひ教えて下さい。 + +最新情報については、Twitter()をフォローしてください。 + + + +## プロジェクトについて +`ent`プロジェクトは、私たちが社内で使用しているエンティティフレームワークであるEntからインスピレーションを得ています。 +entは、[Facebook Connectivity][fbc]チームの[a8m](https://github.com/a8m)と[alexsn](https://github.com/alexsn)が開発・保守しています。 +本番環境では複数のチームやプロジェクトで使用されており、v1リリースまでのロードマップは[こちら](https://github.com/ent/ent/issues/46)に記載されています。 +このプロジェクトの動機については[こちら](https://entgo.io/blog/2019/10/03/introducing-ent)をご覧ください。 + +## ライセンス +entは、[LICENSEファイル](LICENSE)にもある通り、Apache 2.0でライセンスされています。 + + +[entgo instal]: https://entgo.io/docs/code-gen/#version-compatibility-between-entc-and-ent +[Go modules]: https://github.com/golang/go/wiki/Modules#quick-start +[fbc]: https://connectivity.fb.com diff --git a/vendor/entgo.io/ent/README_kr.md b/vendor/entgo.io/ent/README_kr.md new file mode 100644 index 00000000..12b09068 --- /dev/null +++ b/vendor/entgo.io/ent/README_kr.md @@ -0,0 +1,52 @@ +## ent - An Entity Framework For Go + +[English](README.md) | [中文](README_zh.md) | [日本語](README_jp.md) | [한국어](README_kr.md) + + + +간단하지만 강력한 Go용 엔터티 프레임워크로, 대규모 데이터 모델이 포함된 애플리케이션을 쉽게 만들고 유지할 수 있습니다. + +- **스키마를 코드로 관리** - 모든 데이터베이스 스키마와 모델을 Go Object로 구현 가능. +- **어떤 그래프든 쉽게 탐색가능** - 쿼리실행, 집계, 그래프구조를 쉽게 탐색 가능. +- **정적 타입 그리고 명시적인 API** - 100% 생성된 코드로, 정적타입과 명시적인 API를 제공. +- **다양한 스토리지 드라이버** - MySQL, MariaDB, TiDB, PostgreSQL, CockroachDB, SQLite and Gremlin 를 지원 +- **확장성** - Go 템플릿을 이용하여 간단하게 확장, 커스터마이징 가능. + +## 빠른 설치 + +```console +go install entgo.io/ent/cmd/ent@latest +``` + +[Go modules]을 사용하여 바르게 설치하려면, [entgo.io 웹페이지][entgo install]를 방문해주시길 바랍니다. + +## 문서 및 지원 + +Ent 개발 및 사용에 관한 문서는 여기서 확인할 수 있습니다. : https://entgo.io + +토론, 지원을 위해서 [open an issue](https://github.com/ent/ent/issues/new/choose)깃허브 이슈 또는 gophers Slack [채널](https://gophers.slack.com/archives/C01FMSQDT53)에 가입해주세요. + +## ent 커뮤니티 가입 + +ent 커뮤니티의 공동작업이 없었다면, ent를 만들 수 없었을 것입니다. 우리는 기여한 사람들을 [contributors 페이지](doc/md/contributors.md)에 올리고 유지합니다. + +ent에 기여하려면 [CONTRIBUTING](CONTRIBUTING.md)에서 시작 방법을 확인해보세요. +프로젝트나 회사에서 ent를 사용중이면, [ent 유저 페이지](https://github.com/ent/ent/wiki/ent-users)에 추가하여 알려주세요. + +트위터계정을 팔로우하여 업데이트 소식을 확인하세요. https://twitter.com/entgo_io + +## 프로젝트에 관하여 + +ent프로젝트는 내부적으로 사용하는 엔터티 프레임워크 "Ent"에서 영감을 받았습니다. 개발 및 유지보수는 [a8m](https://github.com/a8m) 및 [alexsn](https://github.com/alexsn)[Facebook Connectivity][fbc] 팀에서 담당합니다. 여러 팀이 프로덕션 환경에서 사용하고 있습니다. v1 릴리즈 로드맵에 대한 설명은 [여기](https://github.com/ent/ent/issues/46)를 클릭해주세요. +프로젝트 동기에 대해 더 궁금하시다면 [여기](https://entgo.io/blog/2019/10/03/introducing-ent)를 클릭해주세요. + +## 라이센스 + +ent 라이센스는 Apache 2.0입니다. [LICENSE file](LICENSE)파일에서도 확인 가능합니다. + +[entgo install]: https://entgo.io/docs/code-gen/#version-compatibility-between-entc-and-ent +[go modules]: https://github.com/golang/go/wiki/Modules#quick-start +[fbc]: https://connectivity.fb.com diff --git a/vendor/entgo.io/ent/README_zh.md b/vendor/entgo.io/ent/README_zh.md new file mode 100644 index 00000000..05034b08 --- /dev/null +++ b/vendor/entgo.io/ent/README_zh.md @@ -0,0 +1,44 @@ +## ent - 一个强大的Go语言实体框架 + +[English](README.md) | [中文](README_zh.md) | [日本語](README_jp.md) + + + +ent是一个简单而又功能强大的Go语言实体框架,ent易于构建和维护应用程序与大数据模型。 + +- **图就是代码** - 将任何数据库表建模为Go对象。 +- **轻松地遍历任何图形** - 可以轻松地运行查询、聚合和遍历任何图形结构。 +- **静态类型和显式API** - 使用代码生成静态类型和显式API,查询数据更加便捷。 +- **多存储驱动程序** - 支持MySQL, PostgreSQL, SQLite 和 Gremlin。 +- **可扩展** - 简单地扩展和使用Go模板自定义。 + +## 快速安装 +```console +go install entgo.io/ent/cmd/ent@latest +``` + +请访问[entgo.io website][entgo instal]以使用[Go modules]进行正确安装。 + +## 文档和支持 +开发和使用ent的文档请参照: https://entgo.io + +如要讨论问题和支持, [创建一个issue](https://github.com/ent/ent/issues/new/choose) 或者加入我们的Gopher Slack(Slack软件,类似于论坛)[讨论组](https://gophers.slack.com/archives/C01FMSQDT53) + +## 加入 ent 社区 +如果你想为`ent`做出贡献, [贡献代码](CONTRIBUTING.md) 中写了如何做出自己的贡献 +如果你的公司或者产品在使用`ent`,请让我们知道你已经加入 [ent 用户](https://github.com/ent/ent/wiki/ent-users) + +## 关于项目 +`ent` 项目灵感来自于Ent,Ent是一个facebook内部使用的一个实体框架项目。 它由 [Facebook Connectivity][fbc] 团队通过 [a8m](https://github.com/a8m) 和 [alexsn](https://github.com/alexsn) 开发和维护 +, 它被生产中的多个团队和项目使用。它的v1版本的路线图为 [版本的路线图](https://github.com/ent/ent/issues/46). +关于项目更多的信息 [ent介绍](https://entgo.io/blog/2019/10/03/introducing-ent)。 + +## 声明 +ent使用Apache 2.0协议授权,可以在[LICENSE文件](LICENSE)中找到。 + +[entgo instal]: https://entgo.io/docs/code-gen/#version-compatibility-between-entc-and-ent +[Go modules]: https://github.com/golang/go/wiki/Modules#quick-start +[fbc]: https://connectivity.fb.com diff --git a/vendor/entgo.io/ent/dialect/BUILD b/vendor/entgo.io/ent/dialect/BUILD new file mode 100644 index 00000000..7371b720 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/BUILD @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "dialect", + srcs = ["dialect.go"], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/dialect", + importpath = "entgo.io/ent/dialect", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/google/uuid"], +) diff --git a/vendor/entgo.io/ent/dialect/dialect.go b/vendor/entgo.io/ent/dialect/dialect.go new file mode 100644 index 00000000..33784634 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/dialect.go @@ -0,0 +1,208 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package dialect + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "log" + + "github.com/google/uuid" +) + +// Dialect names for external usage. +const ( + MySQL = "mysql" + SQLite = "sqlite3" + Postgres = "postgres" + Gremlin = "gremlin" +) + +// ExecQuerier wraps the 2 database operations. +type ExecQuerier interface { + // Exec executes a query that does not return records. For example, in SQL, INSERT or UPDATE. + // It scans the result into the pointer v. For SQL drivers, it is dialect/sql.Result. + Exec(ctx context.Context, query string, args, v any) error + // Query executes a query that returns rows, typically a SELECT in SQL. + // It scans the result into the pointer v. For SQL drivers, it is *dialect/sql.Rows. + Query(ctx context.Context, query string, args, v any) error +} + +// Driver is the interface that wraps all necessary operations for ent clients. +type Driver interface { + ExecQuerier + // Tx starts and returns a new transaction. + // The provided context is used until the transaction is committed or rolled back. + Tx(context.Context) (Tx, error) + // Close closes the underlying connection. + Close() error + // Dialect returns the dialect name of the driver. + Dialect() string +} + +// Tx wraps the Exec and Query operations in transaction. +type Tx interface { + ExecQuerier + driver.Tx +} + +type nopTx struct { + Driver +} + +func (nopTx) Commit() error { return nil } +func (nopTx) Rollback() error { return nil } + +// NopTx returns a Tx with a no-op Commit / Rollback methods wrapping +// the provided Driver d. +func NopTx(d Driver) Tx { + return nopTx{d} +} + +// DebugDriver is a driver that logs all driver operations. +type DebugDriver struct { + Driver // underlying driver. + log func(context.Context, ...any) // log function. defaults to log.Println. +} + +// Debug gets a driver and an optional logging function, and returns +// a new debugged-driver that prints all outgoing operations. +func Debug(d Driver, logger ...func(...any)) Driver { + logf := log.Println + if len(logger) == 1 { + logf = logger[0] + } + drv := &DebugDriver{d, func(_ context.Context, v ...any) { logf(v...) }} + return drv +} + +// DebugWithContext gets a driver and a logging function, and returns +// a new debugged-driver that prints all outgoing operations with context. +func DebugWithContext(d Driver, logger func(context.Context, ...any)) Driver { + drv := &DebugDriver{d, logger} + return drv +} + +// Exec logs its params and calls the underlying driver Exec method. +func (d *DebugDriver) Exec(ctx context.Context, query string, args, v any) error { + d.log(ctx, fmt.Sprintf("driver.Exec: query=%v args=%v", query, args)) + return d.Driver.Exec(ctx, query, args, v) +} + +// ExecContext logs its params and calls the underlying driver ExecContext method if it is supported. +func (d *DebugDriver) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { + drv, ok := d.Driver.(interface { + ExecContext(context.Context, string, ...any) (sql.Result, error) + }) + if !ok { + return nil, fmt.Errorf("Driver.ExecContext is not supported") + } + d.log(ctx, fmt.Sprintf("driver.ExecContext: query=%v args=%v", query, args)) + return drv.ExecContext(ctx, query, args...) +} + +// Query logs its params and calls the underlying driver Query method. +func (d *DebugDriver) Query(ctx context.Context, query string, args, v any) error { + d.log(ctx, fmt.Sprintf("driver.Query: query=%v args=%v", query, args)) + return d.Driver.Query(ctx, query, args, v) +} + +// QueryContext logs its params and calls the underlying driver QueryContext method if it is supported. +func (d *DebugDriver) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) { + drv, ok := d.Driver.(interface { + QueryContext(context.Context, string, ...any) (*sql.Rows, error) + }) + if !ok { + return nil, fmt.Errorf("Driver.QueryContext is not supported") + } + d.log(ctx, fmt.Sprintf("driver.QueryContext: query=%v args=%v", query, args)) + return drv.QueryContext(ctx, query, args...) +} + +// Tx adds an log-id for the transaction and calls the underlying driver Tx command. +func (d *DebugDriver) Tx(ctx context.Context) (Tx, error) { + tx, err := d.Driver.Tx(ctx) + if err != nil { + return nil, err + } + id := uuid.New().String() + d.log(ctx, fmt.Sprintf("driver.Tx(%s): started", id)) + return &DebugTx{tx, id, d.log, ctx}, nil +} + +// BeginTx adds an log-id for the transaction and calls the underlying driver BeginTx command if it is supported. +func (d *DebugDriver) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) { + drv, ok := d.Driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (Tx, error) + }) + if !ok { + return nil, fmt.Errorf("Driver.BeginTx is not supported") + } + tx, err := drv.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + id := uuid.New().String() + d.log(ctx, fmt.Sprintf("driver.BeginTx(%s): started", id)) + return &DebugTx{tx, id, d.log, ctx}, nil +} + +// DebugTx is a transaction implementation that logs all transaction operations. +type DebugTx struct { + Tx // underlying transaction. + id string // transaction logging id. + log func(context.Context, ...any) // log function. defaults to fmt.Println. + ctx context.Context // underlying transaction context. +} + +// Exec logs its params and calls the underlying transaction Exec method. +func (d *DebugTx) Exec(ctx context.Context, query string, args, v any) error { + d.log(ctx, fmt.Sprintf("Tx(%s).Exec: query=%v args=%v", d.id, query, args)) + return d.Tx.Exec(ctx, query, args, v) +} + +// ExecContext logs its params and calls the underlying transaction ExecContext method if it is supported. +func (d *DebugTx) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { + drv, ok := d.Tx.(interface { + ExecContext(context.Context, string, ...any) (sql.Result, error) + }) + if !ok { + return nil, fmt.Errorf("Tx.ExecContext is not supported") + } + d.log(ctx, fmt.Sprintf("Tx(%s).ExecContext: query=%v args=%v", d.id, query, args)) + return drv.ExecContext(ctx, query, args...) +} + +// Query logs its params and calls the underlying transaction Query method. +func (d *DebugTx) Query(ctx context.Context, query string, args, v any) error { + d.log(ctx, fmt.Sprintf("Tx(%s).Query: query=%v args=%v", d.id, query, args)) + return d.Tx.Query(ctx, query, args, v) +} + +// QueryContext logs its params and calls the underlying transaction QueryContext method if it is supported. +func (d *DebugTx) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) { + drv, ok := d.Tx.(interface { + QueryContext(context.Context, string, ...any) (*sql.Rows, error) + }) + if !ok { + return nil, fmt.Errorf("Tx.QueryContext is not supported") + } + d.log(ctx, fmt.Sprintf("Tx(%s).QueryContext: query=%v args=%v", d.id, query, args)) + return drv.QueryContext(ctx, query, args...) +} + +// Commit logs this step and calls the underlying transaction Commit method. +func (d *DebugTx) Commit() error { + d.log(d.ctx, fmt.Sprintf("Tx(%s): committed", d.id)) + return d.Tx.Commit() +} + +// Rollback logs this step and calls the underlying transaction Rollback method. +func (d *DebugTx) Rollback() error { + d.log(d.ctx, fmt.Sprintf("Tx(%s): rollbacked", d.id)) + return d.Tx.Rollback() +} diff --git a/vendor/entgo.io/ent/dialect/entsql/BUILD b/vendor/entgo.io/ent/dialect/entsql/BUILD new file mode 100644 index 00000000..bb7bc681 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/entsql/BUILD @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "entsql", + srcs = ["annotation.go"], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/dialect/entsql", + importpath = "entgo.io/ent/dialect/entsql", + visibility = ["//visibility:public"], + deps = ["//vendor/entgo.io/ent/schema"], +) diff --git a/vendor/entgo.io/ent/dialect/entsql/annotation.go b/vendor/entgo.io/ent/dialect/entsql/annotation.go new file mode 100644 index 00000000..c03cb0dc --- /dev/null +++ b/vendor/entgo.io/ent/dialect/entsql/annotation.go @@ -0,0 +1,685 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package entsql + +import "entgo.io/ent/schema" + +// Annotation is a builtin schema annotation for attaching +// SQL metadata to schema objects for both codegen and runtime. +type Annotation struct { + // The Table option allows overriding the default table + // name that is generated by ent. For example: + // + // entsql.Annotation{ + // Table: "Users", + // } + // + Table string `json:"table,omitempty"` + + // Charset defines the character-set of the table. For example: + // + // entsql.Annotation{ + // Charset: "utf8mb4", + // } + // + Charset string `json:"charset,omitempty"` + + // Collation defines the collation of the table (a set of rules for comparing + // characters in a character set). For example: + // + // entsql.Annotation{ + // Collation: "utf8mb4_bin", + // } + // + Collation string `json:"collation,omitempty"` + + // Default specifies a literal default value of a column. Note that using + // this option overrides the default behavior of the code-generation. + // + // entsql.Annotation{ + // Default: `{"key":"value"}`, + // } + // + Default string `json:"default,omitempty"` + + // DefaultExpr specifies an expression default value of a column. Using this option, + // users can define custom expressions to be set as database default values. Note that + // using this option overrides the default behavior of the code-generation. + // + // entsql.Annotation{ + // DefaultExpr: "CURRENT_TIMESTAMP", + // } + // + // entsql.Annotation{ + // DefaultExpr: "uuid_generate_v4()", + // } + // + // entsql.Annotation{ + // DefaultExpr: "(a + b)", + // } + // + DefaultExpr string `json:"default_expr,omitempty"` + + // DefaultExpr specifies an expression default value of a column per dialect. + // See, DefaultExpr for full doc. + // + // entsql.Annotation{ + // DefaultExprs: map[string]string{ + // dialect.MySQL: "uuid()", + // dialect.Postgres: "uuid_generate_v4", + // } + // + DefaultExprs map[string]string `json:"default_exprs,omitempty"` + + // Options defines the additional table options. For example: + // + // entsql.Annotation{ + // Options: "ENGINE = INNODB", + // } + // + Options string `json:"options,omitempty"` + + // Size defines the column size in the generated schema. For example: + // + // entsql.Annotation{ + // Size: 128, + // } + // + Size int64 `json:"size,omitempty"` + + // WithComments specifies whether fields' comments should + // be stored in the database schema as column comments. + // + // withCommentsEnabled := true + // entsql.WithComments{ + // WithComments: &withCommentsEnabled, + // } + // + WithComments *bool `json:"with_comments,omitempty"` + + // Incremental defines the auto-incremental behavior of a column. For example: + // + // incrementalEnabled := true + // entsql.Annotation{ + // Incremental: &incrementalEnabled, + // } + // + // By default, this value is nil defaulting to whatever best fits each scenario. + // + Incremental *bool `json:"incremental,omitempty"` + + // OnDelete specifies a custom referential action for DELETE operations on parent + // table that has matching rows in the child table. + // + // For example, in order to delete rows from the parent table and automatically delete + // their matching rows in the child table, pass the following annotation: + // + // entsql.Annotation{ + // OnDelete: entsql.Cascade, + // } + // + OnDelete ReferenceOption `json:"on_delete,omitempty"` + + // Check allows injecting custom "DDL" for setting an unnamed "CHECK" clause in "CREATE TABLE". + // + // entsql.Annotation{ + // Check: "age < 10", + // } + // + Check string `json:"check,omitempty"` + + // Checks allows injecting custom "DDL" for setting named "CHECK" clauses in "CREATE TABLE". + // + // entsql.Annotation{ + // Checks: map[string]string{ + // "valid_discount": "price > discount_price", + // }, + // } + // + Checks map[string]string `json:"checks,omitempty"` +} + +// Name describes the annotation name. +func (Annotation) Name() string { + return "EntSQL" +} + +// Check allows injecting custom "DDL" for setting an unnamed "CHECK" clause in "CREATE TABLE". +// +// entsql.Annotation{ +// Check: "(`age` < 10)", +// } +func Check(c string) *Annotation { + return &Annotation{ + Check: c, + } +} + +// Checks allows injecting custom "DDL" for setting named "CHECK" clauses in "CREATE TABLE". +// +// entsql.Annotation{ +// Checks: map[string]string{ +// "valid_discount": "price > discount_price", +// }, +// } +func Checks(c map[string]string) *Annotation { + return &Annotation{ + Checks: c, + } +} + +// Default specifies a literal default value of a column. Note that using +// this option overrides the default behavior of the code-generation. +// +// entsql.Annotation{ +// Default: `{"key":"value"}`, +// } +func Default(literal string) *Annotation { + return &Annotation{ + Default: literal, + } +} + +// DefaultExpr specifies an expression default value for the annotated column. +// Using this option, users can define custom expressions to be set as database +// default values.Note that using this option overrides the default behavior of +// the code-generation. +// +// field.UUID("id", uuid.Nil). +// Default(uuid.New). +// Annotations( +// entsql.DefaultExpr("uuid_generate_v4()"), +// ) +func DefaultExpr(expr string) *Annotation { + return &Annotation{ + DefaultExpr: expr, + } +} + +// DefaultExprs specifies an expression default value for the annotated +// column per dialect. See, DefaultExpr for full doc. +// +// field.UUID("id", uuid.Nil). +// Default(uuid.New). +// Annotations( +// entsql.DefaultExprs(map[string]string{ +// dialect.MySQL: "uuid()", +// dialect.Postgres: "uuid_generate_v4()", +// }), +// ) +func DefaultExprs(exprs map[string]string) *Annotation { + return &Annotation{ + DefaultExprs: exprs, + } +} + +// WithComments specifies whether fields' comments should +// be stored in the database schema as column comments. +// +// func (T) Annotations() []schema.Annotation { +// return []schema.Annotation{ +// entsql.WithComments(true), +// } +// } +func WithComments(b bool) *Annotation { + return &Annotation{ + WithComments: &b, + } +} + +// OnDelete specifies a custom referential action for DELETE operations on parent +// table that has matching rows in the child table. +// +// For example, in order to delete rows from the parent table and automatically delete +// their matching rows in the child table, pass the following annotation: +// +// func (T) Annotations() []schema.Annotation { +// return []schema.Annotation{ +// entsql.OnDelete(entsql.Cascade), +// } +// } +func OnDelete(opt ReferenceOption) *Annotation { + return &Annotation{ + OnDelete: opt, + } +} + +// Merge implements the schema.Merger interface. +func (a Annotation) Merge(other schema.Annotation) schema.Annotation { + var ant Annotation + switch other := other.(type) { + case Annotation: + ant = other + case *Annotation: + if other != nil { + ant = *other + } + default: + return a + } + if t := ant.Table; t != "" { + a.Table = t + } + if c := ant.Charset; c != "" { + a.Charset = c + } + if c := ant.Collation; c != "" { + a.Collation = c + } + if d := ant.Default; d != "" { + a.Default = d + } + if d := ant.DefaultExpr; d != "" { + a.DefaultExpr = d + } + if d := ant.DefaultExprs; d != nil { + if a.DefaultExprs == nil { + a.DefaultExprs = make(map[string]string) + } + for dialect, x := range d { + a.DefaultExprs[dialect] = x + } + } + if o := ant.Options; o != "" { + a.Options = o + } + if s := ant.Size; s != 0 { + a.Size = s + } + if b := ant.WithComments; b != nil { + a.WithComments = b + } + if i := ant.Incremental; i != nil { + a.Incremental = i + } + if od := ant.OnDelete; od != "" { + a.OnDelete = od + } + if c := ant.Check; c != "" { + a.Check = c + } + if checks := ant.Checks; len(checks) > 0 { + if a.Checks == nil { + a.Checks = make(map[string]string) + } + for name, check := range checks { + a.Checks[name] = check + } + } + return a +} + +var _ interface { + schema.Annotation + schema.Merger +} = (*Annotation)(nil) + +// ReferenceOption for constraint actions. +type ReferenceOption string + +// Reference options (actions) specified by ON UPDATE and ON DELETE +// subclauses of the FOREIGN KEY clause. +const ( + NoAction ReferenceOption = "NO ACTION" + Restrict ReferenceOption = "RESTRICT" + Cascade ReferenceOption = "CASCADE" + SetNull ReferenceOption = "SET NULL" + SetDefault ReferenceOption = "SET DEFAULT" +) + +// IndexAnnotation is a builtin schema annotation for attaching +// SQL metadata to schema indexes for both codegen and runtime. +type IndexAnnotation struct { + // Prefix defines a column prefix for a single string column index. + // In MySQL, the following annotation maps to: + // + // index.Fields("column"). + // Annotation(entsql.Prefix(100)) + // + // CREATE INDEX `table_column` ON `table`(`column`(100)) + // + Prefix uint + + // PrefixColumns defines column prefixes for a multi-column index. + // In MySQL, the following annotation maps to: + // + // index.Fields("c1", "c2", "c3"). + // Annotation( + // entsql.PrefixColumn("c1", 100), + // entsql.PrefixColumn("c2", 200), + // ) + // + // CREATE INDEX `table_c1_c2_c3` ON `table`(`c1`(100), `c2`(200), `c3`) + // + PrefixColumns map[string]uint + + // Desc defines the DESC clause for a single column index. + // In MySQL, the following annotation maps to: + // + // index.Fields("column"). + // Annotation(entsql.Desc()) + // + // CREATE INDEX `table_column` ON `table`(`column` DESC) + // + Desc bool + + // DescColumns defines the DESC clause for columns in multi-column index. + // In MySQL, the following annotation maps to: + // + // index.Fields("c1", "c2", "c3"). + // Annotation( + // entsql.DescColumns("c1", "c2"), + // ) + // + // CREATE INDEX `table_c1_c2_c3` ON `table`(`c1` DESC, `c2` DESC, `c3`) + // + DescColumns map[string]bool + + // IncludeColumns defines the INCLUDE clause for the index. + // Works only in Postgres and its definition is as follows: + // + // index.Fields("c1"). + // Annotation( + // entsql.IncludeColumns("c2"), + // ) + // + // CREATE INDEX "table_column" ON "table"("c1") INCLUDE ("c2") + // + IncludeColumns []string + + // Type defines the type of the index. + // In MySQL, the following annotation maps to: + // + // index.Fields("c1"). + // Annotation( + // entsql.IndexType("FULLTEXT"), + // ) + // + // CREATE FULLTEXT INDEX `table_c1` ON `table`(`c1`) + // + Type string + + // Types is like the Type option but allows mapping an index-type per dialect. + // + // index.Fields("c1"). + // Annotation( + // entsql.IndexTypes(map[string]string{ + // dialect.MySQL: "FULLTEXT", + // dialect.Postgres: "GIN", + // }), + // ) + // + Types map[string]string + + // OpClass defines the operator class for a single string column index. + // In PostgreSQL, the following annotation maps to: + // + // index.Fields("column"). + // Annotation( + // entsql.IndexType("BRIN"), + // entsql.OpClass("int8_bloom_ops"), + // ) + // + // CREATE INDEX "table_column" ON "table" USING BRIN ("column" int8_bloom_ops) + // + OpClass string + + // OpClassColumns defines operator-classes for a multi-column index. + // In PostgreSQL, the following annotation maps to: + // + // index.Fields("c1", "c2", "c3"). + // Annotation( + // entsql.IndexType("BRIN"), + // entsql.OpClassColumn("c1", "int8_bloom_ops"), + // entsql.OpClassColumn("c2", "int8_minmax_multi_ops(values_per_range=8)"), + // ) + // + // CREATE INDEX "table_column" ON "table" USING BRIN ("c1" int8_bloom_ops, "c2" int8_minmax_multi_ops(values_per_range=8), "c3") + // + OpClassColumns map[string]string + + // IndexWhere allows configuring partial indexes in SQLite and PostgreSQL. + // Read more: https://postgresql.org/docs/current/indexes-partial.html. + // + // Note that the `WHERE` clause should be defined exactly like it is + // stored in the database (i.e. normal form). Read more about this on + // the Atlas website: https://atlasgo.io/concepts/dev-database#diffing. + // + // index.Fields("a"). + // Annotations( + // entsql.IndexWhere("b AND c > 0"), + // ) + // CREATE INDEX "table_a" ON "table"("a") WHERE (b AND c > 0) + Where string +} + +// Prefix returns a new index annotation with a single string column index. +// In MySQL, the following annotation maps to: +// +// index.Fields("column"). +// Annotation(entsql.Prefix(100)) +// +// CREATE INDEX `table_column` ON `table`(`column`(100)) +func Prefix(prefix uint) *IndexAnnotation { + return &IndexAnnotation{ + Prefix: prefix, + } +} + +// PrefixColumn returns a new index annotation with column prefix for +// multi-column indexes. In MySQL, the following annotation maps to: +// +// index.Fields("c1", "c2", "c3"). +// Annotation( +// entsql.PrefixColumn("c1", 100), +// entsql.PrefixColumn("c2", 200), +// ) +// +// CREATE INDEX `table_c1_c2_c3` ON `table`(`c1`(100), `c2`(200), `c3`) +func PrefixColumn(name string, prefix uint) *IndexAnnotation { + return &IndexAnnotation{ + PrefixColumns: map[string]uint{ + name: prefix, + }, + } +} + +// OpClass defines the operator class for a single string column index. +// In PostgreSQL, the following annotation maps to: +// +// index.Fields("column"). +// Annotation( +// entsql.IndexType("BRIN"), +// entsql.OpClass("int8_bloom_ops"), +// ) +// +// CREATE INDEX "table_column" ON "table" USING BRIN ("column" int8_bloom_ops) +func OpClass(op string) *IndexAnnotation { + return &IndexAnnotation{ + OpClass: op, + } +} + +// OpClassColumn returns a new index annotation with column operator +// class for multi-column indexes. In PostgreSQL, the following annotation maps to: +// +// index.Fields("c1", "c2", "c3"). +// Annotation( +// entsql.IndexType("BRIN"), +// entsql.OpClassColumn("c1", "int8_bloom_ops"), +// entsql.OpClassColumn("c2", "int8_minmax_multi_ops(values_per_range=8)"), +// ) +// +// CREATE INDEX "table_column" ON "table" USING BRIN ("c1" int8_bloom_ops, "c2" int8_minmax_multi_ops(values_per_range=8), "c3") +func OpClassColumn(name, op string) *IndexAnnotation { + return &IndexAnnotation{ + OpClassColumns: map[string]string{ + name: op, + }, + } +} + +// Desc returns a new index annotation with the DESC clause for a +// single column index. In MySQL, the following annotation maps to: +// +// index.Fields("column"). +// Annotation(entsql.Desc()) +// +// CREATE INDEX `table_column` ON `table`(`column` DESC) +func Desc() *IndexAnnotation { + return &IndexAnnotation{ + Desc: true, + } +} + +// DescColumns returns a new index annotation with the DESC clause attached to +// the columns in the index. In MySQL, the following annotation maps to: +// +// index.Fields("c1", "c2", "c3"). +// Annotation( +// entsql.DescColumns("c1", "c2"), +// ) +// +// CREATE INDEX `table_c1_c2_c3` ON `table`(`c1` DESC, `c2` DESC, `c3`) +func DescColumns(names ...string) *IndexAnnotation { + ant := &IndexAnnotation{ + DescColumns: make(map[string]bool, len(names)), + } + for i := range names { + ant.DescColumns[names[i]] = true + } + return ant +} + +// IncludeColumns defines the INCLUDE clause for the index. +// Works only in Postgres and its definition is as follows: +// +// index.Fields("c1"). +// Annotation( +// entsql.IncludeColumns("c2"), +// ) +// +// CREATE INDEX "table_column" ON "table"("c1") INCLUDE ("c2") +func IncludeColumns(names ...string) *IndexAnnotation { + return &IndexAnnotation{IncludeColumns: names} +} + +// IndexType defines the type of the index. +// In MySQL, the following annotation maps to: +// +// index.Fields("c1"). +// Annotation( +// entsql.IndexType("FULLTEXT"), +// ) +// +// CREATE FULLTEXT INDEX `table_c1` ON `table`(`c1`) +func IndexType(t string) *IndexAnnotation { + return &IndexAnnotation{Type: t} +} + +// IndexTypes is like the Type option but allows mapping an index-type per dialect. +// +// index.Fields("c1"). +// Annotations( +// entsql.IndexTypes(map[string]string{ +// dialect.MySQL: "FULLTEXT", +// dialect.Postgres: "GIN", +// }), +// ) +func IndexTypes(types map[string]string) *IndexAnnotation { + return &IndexAnnotation{Types: types} +} + +// IndexWhere allows configuring partial indexes in SQLite and PostgreSQL. +// Read more: https://postgresql.org/docs/current/indexes-partial.html. +// +// Note that the `WHERE` clause should be defined exactly like it is +// stored in the database (i.e. normal form). Read more about this on the +// Atlas website: https://atlasgo.io/concepts/dev-database#diffing. +// +// index.Fields("a"). +// Annotations( +// entsql.IndexWhere("b AND c > 0"), +// ) +// CREATE INDEX "table_a" ON "table"("a") WHERE (b AND c > 0) +func IndexWhere(pred string) *IndexAnnotation { + return &IndexAnnotation{Where: pred} +} + +// Name describes the annotation name. +func (IndexAnnotation) Name() string { + return "EntSQLIndexes" +} + +// Merge implements the schema.Merger interface. +func (a IndexAnnotation) Merge(other schema.Annotation) schema.Annotation { + var ant IndexAnnotation + switch other := other.(type) { + case IndexAnnotation: + ant = other + case *IndexAnnotation: + if other != nil { + ant = *other + } + default: + return a + } + if ant.Prefix != 0 { + a.Prefix = ant.Prefix + } + if ant.PrefixColumns != nil { + if a.PrefixColumns == nil { + a.PrefixColumns = make(map[string]uint) + } + for column, prefix := range ant.PrefixColumns { + a.PrefixColumns[column] = prefix + } + } + if ant.OpClass != "" { + a.OpClass = ant.OpClass + } + if ant.OpClassColumns != nil { + if a.OpClassColumns == nil { + a.OpClassColumns = make(map[string]string) + } + for column, op := range ant.OpClassColumns { + a.OpClassColumns[column] = op + } + } + if ant.Desc { + a.Desc = ant.Desc + } + if ant.DescColumns != nil { + if a.DescColumns == nil { + a.DescColumns = make(map[string]bool) + } + for column, desc := range ant.DescColumns { + a.DescColumns[column] = desc + } + } + if ant.IncludeColumns != nil { + a.IncludeColumns = append(a.IncludeColumns, ant.IncludeColumns...) + } + if ant.Type != "" { + a.Type = ant.Type + } + if ant.Types != nil { + if a.Types == nil { + a.Types = make(map[string]string) + } + for dialect, t := range ant.Types { + a.Types[dialect] = t + } + } + if ant.Where != "" { + a.Where = ant.Where + } + return a +} + +var _ interface { + schema.Annotation + schema.Merger +} = (*IndexAnnotation)(nil) diff --git a/vendor/entgo.io/ent/dialect/sql/BUILD b/vendor/entgo.io/ent/dialect/sql/BUILD new file mode 100644 index 00000000..1b0f5de2 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/BUILD @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sql", + srcs = [ + "builder.go", + "driver.go", + "scan.go", + "sql.go", + ], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/dialect/sql", + importpath = "entgo.io/ent/dialect/sql", + visibility = ["//visibility:public"], + deps = ["//vendor/entgo.io/ent/dialect"], +) diff --git a/vendor/entgo.io/ent/dialect/sql/builder.go b/vendor/entgo.io/ent/dialect/sql/builder.go new file mode 100644 index 00000000..6d9537df --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/builder.go @@ -0,0 +1,3996 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +// Package sql provides wrappers around the standard database/sql package +// to allow the generated code to interact with a statically-typed API. +// +// Users that are interacting with this package should be aware that the +// following builders don't check the given SQL syntax nor validate or escape +// user-inputs. ~All validations are expected to be happened in the generated +// ent package. +package sql + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "strconv" + "strings" + + "entgo.io/ent/dialect" +) + +// Querier wraps the basic Query method that is implemented +// by the different builders in this file. +type Querier interface { + // Query returns the query representation of the element + // and its arguments (if any). + Query() (string, []any) +} + +// querierErr allowed propagate Querier's inner error +type querierErr interface { + Err() error +} + +// ColumnBuilder is a builder for column definition in table creation. +type ColumnBuilder struct { + Builder + typ string // column type. + name string // column name. + attr string // extra attributes. + modify bool // modify existing. + fk *ForeignKeyBuilder // foreign-key constraint. + check func(*Builder) // column checks. +} + +// Column returns a new ColumnBuilder with the given name. +// +// sql.Column("group_id").Type("int").Attr("UNIQUE") +func Column(name string) *ColumnBuilder { return &ColumnBuilder{name: name} } + +// Type sets the column type. +func (c *ColumnBuilder) Type(t string) *ColumnBuilder { + c.typ = t + return c +} + +// Attr sets an extra attribute for the column, like UNIQUE or AUTO_INCREMENT. +func (c *ColumnBuilder) Attr(attr string) *ColumnBuilder { + if c.attr != "" && attr != "" { + c.attr += " " + } + c.attr += attr + return c +} + +// Constraint adds the CONSTRAINT clause to the ADD COLUMN statement in SQLite. +func (c *ColumnBuilder) Constraint(fk *ForeignKeyBuilder) *ColumnBuilder { + c.fk = fk + return c +} + +// Check adds a CHECK clause to the ADD COLUMN statement. +func (c *ColumnBuilder) Check(check func(*Builder)) *ColumnBuilder { + c.check = check + return c +} + +// Query returns query representation of a Column. +func (c *ColumnBuilder) Query() (string, []any) { + c.Ident(c.name) + if c.typ != "" { + if c.postgres() && c.modify { + c.WriteString(" TYPE") + } + c.Pad().WriteString(c.typ) + } + if c.attr != "" { + c.Pad().WriteString(c.attr) + } + if c.fk != nil { + c.WriteString(" CONSTRAINT " + c.fk.symbol) + c.Pad().Join(c.fk.ref) + for _, action := range c.fk.actions { + c.Pad().WriteString(action) + } + } + if c.check != nil { + c.WriteString(" CHECK ") + c.Wrap(c.check) + } + return c.String(), c.args +} + +// TableBuilder is a query builder for `CREATE TABLE` statement. +type TableBuilder struct { + Builder + name string // table name. + exists bool // check existence. + charset string // table charset. + collation string // table collation. + options string // table options. + columns []Querier // table columns. + primary []string // primary key. + constraints []Querier // foreign keys and indices. + checks []func(*Builder) // check constraints. +} + +// CreateTable returns a query builder for the `CREATE TABLE` statement. +// +// CreateTable("users"). +// Columns( +// Column("id").Type("int").Attr("auto_increment"), +// Column("name").Type("varchar(255)"), +// ). +// PrimaryKey("id") +func CreateTable(name string) *TableBuilder { return &TableBuilder{name: name} } + +// IfNotExists appends the `IF NOT EXISTS` clause to the `CREATE TABLE` statement. +func (t *TableBuilder) IfNotExists() *TableBuilder { + t.exists = true + return t +} + +// Column appends the given column to the `CREATE TABLE` statement. +func (t *TableBuilder) Column(c *ColumnBuilder) *TableBuilder { + t.columns = append(t.columns, c) + return t +} + +// Columns appends a list of columns to the builder. +func (t *TableBuilder) Columns(columns ...*ColumnBuilder) *TableBuilder { + t.columns = make([]Querier, 0, len(columns)) + for i := range columns { + t.columns = append(t.columns, columns[i]) + } + return t +} + +// PrimaryKey adds a column to the primary-key constraint in the statement. +func (t *TableBuilder) PrimaryKey(column ...string) *TableBuilder { + t.primary = append(t.primary, column...) + return t +} + +// ForeignKeys adds a list of foreign-keys to the statement (without constraints). +func (t *TableBuilder) ForeignKeys(fks ...*ForeignKeyBuilder) *TableBuilder { + queries := make([]Querier, len(fks)) + for i := range fks { + // Erase the constraint symbol/name. + fks[i].symbol = "" + queries[i] = fks[i] + } + t.constraints = append(t.constraints, queries...) + return t +} + +// Constraints adds a list of foreign-key constraints to the statement. +func (t *TableBuilder) Constraints(fks ...*ForeignKeyBuilder) *TableBuilder { + queries := make([]Querier, len(fks)) + for i := range fks { + queries[i] = &Wrapper{"CONSTRAINT %s", fks[i]} + } + t.constraints = append(t.constraints, queries...) + return t +} + +// Checks adds CHECK clauses to the CREATE TABLE statement. +func (t *TableBuilder) Checks(checks ...func(*Builder)) *TableBuilder { + t.checks = append(t.checks, checks...) + return t +} + +// Charset appends the `CHARACTER SET` clause to the statement. MySQL only. +func (t *TableBuilder) Charset(s string) *TableBuilder { + t.charset = s + return t +} + +// Collate appends the `COLLATE` clause to the statement. MySQL only. +func (t *TableBuilder) Collate(s string) *TableBuilder { + t.collation = s + return t +} + +// Options appends additional options to the statement (MySQL only). +func (t *TableBuilder) Options(s string) *TableBuilder { + t.options = s + return t +} + +// Query returns query representation of a `CREATE TABLE` statement. +// +// CREATE TABLE [IF NOT EXISTS] name +// +// (table definition) +// [charset and collation] +func (t *TableBuilder) Query() (string, []any) { + t.WriteString("CREATE TABLE ") + if t.exists { + t.WriteString("IF NOT EXISTS ") + } + t.Ident(t.name) + t.Wrap(func(b *Builder) { + b.JoinComma(t.columns...) + if len(t.primary) > 0 { + b.Comma().WriteString("PRIMARY KEY") + b.Wrap(func(b *Builder) { + b.IdentComma(t.primary...) + }) + } + if len(t.constraints) > 0 { + b.Comma().JoinComma(t.constraints...) + } + for _, check := range t.checks { + check(b.Comma()) + } + }) + if t.charset != "" { + t.WriteString(" CHARACTER SET " + t.charset) + } + if t.collation != "" { + t.WriteString(" COLLATE " + t.collation) + } + if t.options != "" { + t.WriteString(" " + t.options) + } + return t.String(), t.args +} + +// DescribeBuilder is a query builder for `DESCRIBE` statement. +type DescribeBuilder struct { + Builder + name string // table name. +} + +// Describe returns a query builder for the `DESCRIBE` statement. +// +// Describe("users") +func Describe(name string) *DescribeBuilder { return &DescribeBuilder{name: name} } + +// Query returns query representation of a `DESCRIBE` statement. +func (t *DescribeBuilder) Query() (string, []any) { + t.WriteString("DESCRIBE ") + t.Ident(t.name) + return t.String(), nil +} + +// TableAlter is a query builder for `ALTER TABLE` statement. +type TableAlter struct { + Builder + name string // table to alter. + Queries []Querier // columns and foreign-keys to add. +} + +// AlterTable returns a query builder for the `ALTER TABLE` statement. +// +// AlterTable("users"). +// AddColumn(Column("group_id").Type("int").Attr("UNIQUE")). +// AddForeignKey(ForeignKey().Columns("group_id"). +// Reference(Reference().Table("groups").Columns("id")).OnDelete("CASCADE")), +// ) +func AlterTable(name string) *TableAlter { return &TableAlter{name: name} } + +// AddColumn appends the `ADD COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) AddColumn(c *ColumnBuilder) *TableAlter { + t.Queries = append(t.Queries, &Wrapper{"ADD COLUMN %s", c}) + return t +} + +// ModifyColumn appends the `MODIFY/ALTER COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) ModifyColumn(c *ColumnBuilder) *TableAlter { + switch { + case t.postgres(): + c.modify = true + t.Queries = append(t.Queries, &Wrapper{"ALTER COLUMN %s", c}) + default: + t.Queries = append(t.Queries, &Wrapper{"MODIFY COLUMN %s", c}) + } + return t +} + +// RenameColumn appends the `RENAME COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) RenameColumn(old, new string) *TableAlter { + t.Queries = append(t.Queries, Raw(fmt.Sprintf("RENAME COLUMN %s TO %s", t.Quote(old), t.Quote(new)))) + return t +} + +// ModifyColumns calls ModifyColumn with each of the given builders. +func (t *TableAlter) ModifyColumns(cs ...*ColumnBuilder) *TableAlter { + for _, c := range cs { + t.ModifyColumn(c) + } + return t +} + +// DropColumn appends the `DROP COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) DropColumn(c *ColumnBuilder) *TableAlter { + t.Queries = append(t.Queries, &Wrapper{"DROP COLUMN %s", c}) + return t +} + +// ChangeColumn appends the `CHANGE COLUMN` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) ChangeColumn(name string, c *ColumnBuilder) *TableAlter { + prefix := fmt.Sprintf("CHANGE COLUMN %s", t.Quote(name)) + t.Queries = append(t.Queries, &Wrapper{prefix + " %s", c}) + return t +} + +// RenameIndex appends the `RENAME INDEX` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) RenameIndex(curr, new string) *TableAlter { + t.Queries = append(t.Queries, Raw(fmt.Sprintf("RENAME INDEX %s TO %s", t.Quote(curr), t.Quote(new)))) + return t +} + +// DropIndex appends the `DROP INDEX` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) DropIndex(name string) *TableAlter { + t.Queries = append(t.Queries, Raw(fmt.Sprintf("DROP INDEX %s", t.Quote(name)))) + return t +} + +// AddIndex appends the `ADD INDEX` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) AddIndex(idx *IndexBuilder) *TableAlter { + b := &Builder{dialect: t.dialect} + b.WriteString("ADD ") + if idx.unique { + b.WriteString("UNIQUE ") + } + b.WriteString("INDEX ") + b.Ident(idx.name) + b.Wrap(func(b *Builder) { + b.IdentComma(idx.columns...) + }) + t.Queries = append(t.Queries, b) + return t +} + +// AddForeignKey adds a foreign key constraint to the `ALTER TABLE` statement. +func (t *TableAlter) AddForeignKey(fk *ForeignKeyBuilder) *TableAlter { + t.Queries = append(t.Queries, &Wrapper{"ADD CONSTRAINT %s", fk}) + return t +} + +// DropConstraint appends the `DROP CONSTRAINT` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) DropConstraint(ident string) *TableAlter { + t.Queries = append(t.Queries, Raw(fmt.Sprintf("DROP CONSTRAINT %s", t.Quote(ident)))) + return t +} + +// DropForeignKey appends the `DROP FOREIGN KEY` clause to the given `ALTER TABLE` statement. +func (t *TableAlter) DropForeignKey(ident string) *TableAlter { + t.Queries = append(t.Queries, Raw(fmt.Sprintf("DROP FOREIGN KEY %s", t.Quote(ident)))) + return t +} + +// Query returns query representation of the `ALTER TABLE` statement. +// +// ALTER TABLE name +// [alter_specification] +func (t *TableAlter) Query() (string, []any) { + t.WriteString("ALTER TABLE ") + t.Ident(t.name) + t.Pad() + t.JoinComma(t.Queries...) + return t.String(), t.args +} + +// IndexAlter is a query builder for `ALTER INDEX` statement. +type IndexAlter struct { + Builder + name string // index to alter. + Queries []Querier // alter options. +} + +// AlterIndex returns a query builder for the `ALTER INDEX` statement. +// +// AlterIndex("old_key"). +// Rename("new_key") +func AlterIndex(name string) *IndexAlter { return &IndexAlter{name: name} } + +// Rename appends the `RENAME TO` clause to the `ALTER INDEX` statement. +func (i *IndexAlter) Rename(name string) *IndexAlter { + i.Queries = append(i.Queries, Raw(fmt.Sprintf("RENAME TO %s", i.Quote(name)))) + return i +} + +// Query returns query representation of the `ALTER INDEX` statement. +// +// ALTER INDEX name +// [alter_specification] +func (i *IndexAlter) Query() (string, []any) { + i.WriteString("ALTER INDEX ") + i.Ident(i.name) + i.Pad() + i.JoinComma(i.Queries...) + return i.String(), i.args +} + +// ForeignKeyBuilder is the builder for the foreign-key constraint clause. +type ForeignKeyBuilder struct { + Builder + symbol string + columns []string + actions []string + ref *ReferenceBuilder +} + +// ForeignKey returns a builder for the foreign-key constraint clause in create/alter table statements. +// +// ForeignKey(). +// Columns("group_id"). +// Reference(Reference().Table("groups").Columns("id")). +// OnDelete("CASCADE") +func ForeignKey(symbol ...string) *ForeignKeyBuilder { + fk := &ForeignKeyBuilder{} + if len(symbol) != 0 { + fk.symbol = symbol[0] + } + return fk +} + +// Symbol sets the symbol of the foreign key. +func (fk *ForeignKeyBuilder) Symbol(s string) *ForeignKeyBuilder { + fk.symbol = s + return fk +} + +// Columns sets the columns of the foreign key in the source table. +func (fk *ForeignKeyBuilder) Columns(s ...string) *ForeignKeyBuilder { + fk.columns = append(fk.columns, s...) + return fk +} + +// Reference sets the reference clause. +func (fk *ForeignKeyBuilder) Reference(r *ReferenceBuilder) *ForeignKeyBuilder { + fk.ref = r + return fk +} + +// OnDelete sets the on delete action for this constraint. +func (fk *ForeignKeyBuilder) OnDelete(action string) *ForeignKeyBuilder { + fk.actions = append(fk.actions, "ON DELETE "+action) + return fk +} + +// OnUpdate sets the on delete action for this constraint. +func (fk *ForeignKeyBuilder) OnUpdate(action string) *ForeignKeyBuilder { + fk.actions = append(fk.actions, "ON UPDATE "+action) + return fk +} + +// Query returns query representation of a foreign key constraint. +func (fk *ForeignKeyBuilder) Query() (string, []any) { + if fk.symbol != "" { + fk.Ident(fk.symbol).Pad() + } + fk.WriteString("FOREIGN KEY") + fk.Wrap(func(b *Builder) { + b.IdentComma(fk.columns...) + }) + fk.Pad().Join(fk.ref) + for _, action := range fk.actions { + fk.Pad().WriteString(action) + } + return fk.String(), fk.args +} + +// ReferenceBuilder is a builder for the reference clause in constraints. For example, in foreign key creation. +type ReferenceBuilder struct { + Builder + table string // referenced table. + columns []string // referenced columns. +} + +// Reference creates a reference builder for the reference_option clause. +// +// Reference().Table("groups").Columns("id") +func Reference() *ReferenceBuilder { return &ReferenceBuilder{} } + +// Table sets the referenced table. +func (r *ReferenceBuilder) Table(s string) *ReferenceBuilder { + r.table = s + return r +} + +// Columns sets the columns of the referenced table. +func (r *ReferenceBuilder) Columns(s ...string) *ReferenceBuilder { + r.columns = append(r.columns, s...) + return r +} + +// Query returns query representation of a reference clause. +func (r *ReferenceBuilder) Query() (string, []any) { + r.WriteString("REFERENCES ") + r.Ident(r.table) + r.Wrap(func(b *Builder) { + b.IdentComma(r.columns...) + }) + return r.String(), r.args +} + +// IndexBuilder is a builder for `CREATE INDEX` statement. +type IndexBuilder struct { + Builder + name string + unique bool + exists bool + table string + method string + columns []string +} + +// CreateIndex creates a builder for the `CREATE INDEX` statement. +// +// CreateIndex("index_name"). +// Unique(). +// Table("users"). +// Column("name") +// +// Or: +// +// CreateIndex("index_name"). +// Unique(). +// Table("users"). +// Columns("name", "age") +func CreateIndex(name string) *IndexBuilder { + return &IndexBuilder{name: name} +} + +// IfNotExists appends the `IF NOT EXISTS` clause to the `CREATE INDEX` statement. +func (i *IndexBuilder) IfNotExists() *IndexBuilder { + i.exists = true + return i +} + +// Unique sets the index to be a unique index. +func (i *IndexBuilder) Unique() *IndexBuilder { + i.unique = true + return i +} + +// Table defines the table for the index. +func (i *IndexBuilder) Table(table string) *IndexBuilder { + i.table = table + return i +} + +// Using sets the method to create the index with. +func (i *IndexBuilder) Using(method string) *IndexBuilder { + i.method = method + return i +} + +// Column appends a column to the column list for the index. +func (i *IndexBuilder) Column(column string) *IndexBuilder { + i.columns = append(i.columns, column) + return i +} + +// Columns appends the given columns to the column list for the index. +func (i *IndexBuilder) Columns(columns ...string) *IndexBuilder { + i.columns = append(i.columns, columns...) + return i +} + +// Query returns query representation of a reference clause. +func (i *IndexBuilder) Query() (string, []any) { + i.WriteString("CREATE ") + if i.unique { + i.WriteString("UNIQUE ") + } + i.WriteString("INDEX ") + if i.exists { + i.WriteString("IF NOT EXISTS ") + } + i.Ident(i.name) + i.WriteString(" ON ") + i.Ident(i.table) + switch i.dialect { + case dialect.Postgres: + if i.method != "" { + i.WriteString(" USING ").Ident(i.method) + } + i.Wrap(func(b *Builder) { + b.IdentComma(i.columns...) + }) + case dialect.MySQL: + i.Wrap(func(b *Builder) { + b.IdentComma(i.columns...) + }) + if i.method != "" { + i.WriteString(" USING " + i.method) + } + default: + i.Wrap(func(b *Builder) { + b.IdentComma(i.columns...) + }) + } + return i.String(), nil +} + +// DropIndexBuilder is a builder for `DROP INDEX` statement. +type DropIndexBuilder struct { + Builder + name string + table string +} + +// DropIndex creates a builder for the `DROP INDEX` statement. +// +// MySQL: +// +// DropIndex("index_name"). +// Table("users"). +// +// SQLite/PostgreSQL: +// +// DropIndex("index_name") +func DropIndex(name string) *DropIndexBuilder { + return &DropIndexBuilder{name: name} +} + +// Table defines the table for the index. +func (d *DropIndexBuilder) Table(table string) *DropIndexBuilder { + d.table = table + return d +} + +// Query returns query representation of a reference clause. +// +// DROP INDEX index_name [ON table_name] +func (d *DropIndexBuilder) Query() (string, []any) { + d.WriteString("DROP INDEX ") + d.Ident(d.name) + if d.table != "" { + d.WriteString(" ON ") + d.Ident(d.table) + } + return d.String(), nil +} + +// InsertBuilder is a builder for `INSERT INTO` statement. +type InsertBuilder struct { + Builder + table string + schema string + columns []string + defaults bool + returning []string + values [][]any + conflict *conflict +} + +// Insert creates a builder for the `INSERT INTO` statement. +// +// Insert("users"). +// Columns("name", "age"). +// Values("a8m", 10). +// Values("foo", 20) +// +// Note: Insert inserts all values in one batch. +func Insert(table string) *InsertBuilder { return &InsertBuilder{table: table} } + +// Schema sets the database name for the insert table. +func (i *InsertBuilder) Schema(name string) *InsertBuilder { + i.schema = name + return i +} + +// Set is a syntactic sugar API for inserting only one row. +func (i *InsertBuilder) Set(column string, v any) *InsertBuilder { + i.columns = append(i.columns, column) + if len(i.values) == 0 { + i.values = append(i.values, []any{v}) + } else { + i.values[0] = append(i.values[0], v) + } + return i +} + +// Columns appends columns to the INSERT statement. +func (i *InsertBuilder) Columns(columns ...string) *InsertBuilder { + i.columns = append(i.columns, columns...) + return i +} + +// Values append a value tuple for the insert statement. +func (i *InsertBuilder) Values(values ...any) *InsertBuilder { + i.values = append(i.values, values) + return i +} + +// Default sets the default values clause based on the dialect type. +func (i *InsertBuilder) Default() *InsertBuilder { + i.defaults = true + return i +} + +// Returning adds the `RETURNING` clause to the insert statement. +// Supported by SQLite and PostgreSQL. +func (i *InsertBuilder) Returning(columns ...string) *InsertBuilder { + i.returning = columns + return i +} + +type ( + // conflict holds the configuration for the + // `ON CONFLICT` / `ON DUPLICATE KEY` clause. + conflict struct { + target struct { + constraint string + columns []string + where *Predicate + } + action struct { + nothing bool + where *Predicate + update []func(*UpdateSet) + } + } + + // ConflictOption allows configuring the + // conflict config using functional options. + ConflictOption func(*conflict) +) + +// ConflictColumns sets the unique constraints that trigger the conflict +// resolution on insert to perform an upsert operation. The columns must +// have a unique constraint applied to trigger this behaviour. +// +// sql.Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// sql.ConflictColumns("id"), +// sql.ResolveWithNewValues(), +// ) +func ConflictColumns(names ...string) ConflictOption { + return func(c *conflict) { + c.target.columns = names + } +} + +// ConflictConstraint allows setting the constraint +// name (i.e. `ON CONSTRAINT `) for PostgreSQL. +// +// sql.Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// sql.ConflictConstraint("users_pkey"), +// sql.ResolveWithNewValues(), +// ) +func ConflictConstraint(name string) ConflictOption { + return func(c *conflict) { + c.target.constraint = name + } +} + +// ConflictWhere allows inference of partial unique indexes. See, PostgreSQL +// doc: https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT +func ConflictWhere(p *Predicate) ConflictOption { + return func(c *conflict) { + c.target.where = p + } +} + +// UpdateWhere allows setting the update condition. Only rows +// for which this expression returns true will be updated. +func UpdateWhere(p *Predicate) ConflictOption { + return func(c *conflict) { + c.action.where = p + } +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported by SQLite and PostgreSQL. +// +// sql.Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// sql.ConflictColumns("id"), +// sql.DoNothing() +// ) +func DoNothing() ConflictOption { + return func(c *conflict) { + c.action.nothing = true + } +} + +// ResolveWithIgnore sets each column to itself to force an update and return the ID, +// otherwise does not change any data. This may still trigger update hooks in the database. +// +// sql.Insert("users"). +// Columns("id"). +// Values(1). +// OnConflict( +// sql.ConflictColumns("id"), +// sql.ResolveWithIgnore() +// ) +// +// // Output: +// // MySQL: INSERT INTO `users` (`id`) VALUES(1) ON DUPLICATE KEY UPDATE `id` = `users`.`id` +// // PostgreSQL: INSERT INTO "users" ("id") VALUES(1) ON CONFLICT ("id") DO UPDATE SET "id" = "users"."id +func ResolveWithIgnore() ConflictOption { + return func(c *conflict) { + c.action.update = append(c.action.update, func(u *UpdateSet) { + for _, c := range u.columns { + u.SetIgnore(c) + } + }) + } +} + +// ResolveWithNewValues updates columns using the new values proposed +// for insertion using the special EXCLUDED/VALUES table. +// +// sql.Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// sql.ConflictColumns("id"), +// sql.ResolveWithNewValues() +// ) +// +// // Output: +// // MySQL: INSERT INTO `users` (`id`, `name`) VALUES(1, 'Mashraki) ON DUPLICATE KEY UPDATE `id` = VALUES(`id`), `name` = VALUES(`name`), +// // PostgreSQL: INSERT INTO "users" ("id") VALUES(1) ON CONFLICT ("id") DO UPDATE SET "id" = "excluded"."id, "name" = "excluded"."name" +func ResolveWithNewValues() ConflictOption { + return func(c *conflict) { + c.action.update = append(c.action.update, func(u *UpdateSet) { + for _, c := range u.columns { + u.SetExcluded(c) + } + }) + } +} + +// ResolveWith allows setting a custom function to set the `UPDATE` clause. +// +// Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// ConflictColumns("name"), +// ResolveWith(func(u *UpdateSet) { +// u.SetIgnore("id") +// u.SetNull("created_at") +// u.Set("name", Expr(u.Excluded().C("name"))) +// }), +// ) +func ResolveWith(fn func(*UpdateSet)) ConflictOption { + return func(c *conflict) { + c.action.update = append(c.action.update, fn) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// sql.Insert("users"). +// Columns("id", "name"). +// Values(1, "Mashraki"). +// OnConflict( +// sql.ConflictColumns("id"), +// sql.ResolveWithNewValues() +// ) +func (i *InsertBuilder) OnConflict(opts ...ConflictOption) *InsertBuilder { + if i.conflict == nil { + i.conflict = &conflict{} + } + for _, opt := range opts { + opt(i.conflict) + } + return i +} + +// UpdateSet describes a set of changes of the `DO UPDATE` clause. +type UpdateSet struct { + columns []string + update *UpdateBuilder +} + +// Table returns the table the `UPSERT` statement is executed on. +func (u *UpdateSet) Table() *SelectTable { + return Dialect(u.update.dialect).Table(u.update.table) +} + +// Columns returns all columns in the `INSERT` statement. +func (u *UpdateSet) Columns() []string { + return u.columns +} + +// UpdateColumns returns all columns in the `UPDATE` statement. +func (u *UpdateSet) UpdateColumns() []string { + return append(u.update.nulls, u.update.columns...) +} + +// Set sets a column to a given value. +func (u *UpdateSet) Set(column string, v any) *UpdateSet { + u.update.Set(column, v) + return u +} + +// Add adds a numeric value to the given column. +func (u *UpdateSet) Add(column string, v any) *UpdateSet { + u.update.Add(column, v) + return u +} + +// SetNull sets a column as null value. +func (u *UpdateSet) SetNull(column string) *UpdateSet { + u.update.SetNull(column) + return u +} + +// SetIgnore sets the column to itself. For example, "id" = "users"."id". +func (u *UpdateSet) SetIgnore(name string) *UpdateSet { + return u.Set(name, Expr(u.Table().C(name))) +} + +// SetExcluded sets the column name to its EXCLUDED/VALUES value. +// For example, "c" = "excluded"."c", or `c` = VALUES(`c`). +func (u *UpdateSet) SetExcluded(name string) *UpdateSet { + switch u.update.Dialect() { + case dialect.MySQL: + u.update.Set(name, ExprFunc(func(b *Builder) { + b.WriteString("VALUES(").Ident(name).WriteByte(')') + })) + default: + t := Dialect(u.update.dialect).Table("excluded") + u.update.Set(name, Expr(t.C(name))) + } + return u +} + +// Query returns query representation of an `INSERT INTO` statement. +func (i *InsertBuilder) Query() (string, []any) { + query, args, _ := i.QueryErr() + return query, args +} + +// QueryErr returns query representation of an `INSERT INTO` +// statement and any error occurred in building the statement. +func (i *InsertBuilder) QueryErr() (string, []any, error) { + b := i.Builder.clone() + b.WriteString("INSERT INTO ") + b.writeSchema(i.schema) + b.Ident(i.table).Pad() + if i.defaults && len(i.columns) == 0 { + i.writeDefault(&b) + } else { + b.WriteByte('(').IdentComma(i.columns...).WriteByte(')') + b.WriteString(" VALUES ") + for j, v := range i.values { + if j > 0 { + b.Comma() + } + b.WriteByte('(').Args(v...).WriteByte(')') + } + } + if i.conflict != nil { + i.writeConflict(&b) + } + joinReturning(i.returning, &b) + return b.String(), b.args, b.Err() +} + +func (i *InsertBuilder) writeDefault(b *Builder) { + switch i.Dialect() { + case dialect.MySQL: + b.WriteString("VALUES ()") + case dialect.SQLite, dialect.Postgres: + b.WriteString("DEFAULT VALUES") + } +} + +func (i *InsertBuilder) writeConflict(b *Builder) { + switch i.Dialect() { + case dialect.MySQL: + b.WriteString(" ON DUPLICATE KEY UPDATE ") + // Fallback to ResolveWithIgnore() as MySQL + // does not support the "DO NOTHING" clause. + if i.conflict.action.nothing { + i.OnConflict(ResolveWithIgnore()) + } + case dialect.SQLite, dialect.Postgres: + b.WriteString(" ON CONFLICT") + switch t := i.conflict.target; { + case t.constraint != "" && len(t.columns) != 0: + b.AddError(fmt.Errorf("duplicate CONFLICT clauses: %q, %q", t.constraint, t.columns)) + case t.constraint != "": + b.WriteString(" ON CONSTRAINT ").Ident(t.constraint) + case len(t.columns) != 0: + b.WriteString(" (").IdentComma(t.columns...).WriteByte(')') + } + if p := i.conflict.target.where; p != nil { + b.WriteString(" WHERE ").Join(p) + } + if i.conflict.action.nothing { + b.WriteString(" DO NOTHING") + return + } + b.WriteString(" DO UPDATE SET ") + } + if len(i.conflict.action.update) == 0 { + b.AddError(errors.New("missing action for 'DO UPDATE SET' clause")) + } + u := &UpdateSet{columns: i.columns, update: Dialect(i.dialect).Update(i.table)} + u.update.Builder = *b + for _, f := range i.conflict.action.update { + f(u) + } + u.update.writeSetter(b) + if p := i.conflict.action.where; p != nil { + p.qualifier = i.table + b.WriteString(" WHERE ").Join(p) + } +} + +// UpdateBuilder is a builder for `UPDATE` statement. +type UpdateBuilder struct { + Builder + table string + schema string + where *Predicate + nulls []string + columns []string + returning []string + values []any + order []any + limit *int + prefix Queries +} + +// Update creates a builder for the `UPDATE` statement. +// +// Update("users").Set("name", "foo").Set("age", 10) +func Update(table string) *UpdateBuilder { return &UpdateBuilder{table: table} } + +// Schema sets the database name for the updated table. +func (u *UpdateBuilder) Schema(name string) *UpdateBuilder { + u.schema = name + return u +} + +// Set sets a column to a given value. If `Set` was called before with +// the same column name, it overrides the value of the previous call. +func (u *UpdateBuilder) Set(column string, v any) *UpdateBuilder { + for i := range u.columns { + if column == u.columns[i] { + u.values[i] = v + return u + } + } + u.columns = append(u.columns, column) + u.values = append(u.values, v) + return u +} + +// Add adds a numeric value to the given column. Note that, calling Set(c) +// after Add(c) will erase previous calls with c from the builder. +func (u *UpdateBuilder) Add(column string, v any) *UpdateBuilder { + u.columns = append(u.columns, column) + u.values = append(u.values, ExprFunc(func(b *Builder) { + b.WriteString("COALESCE") + b.Wrap(func(b *Builder) { + b.Ident(Table(u.table).C(column)).Comma().WriteByte('0') + }) + b.WriteString(" + ") + b.Arg(v) + })) + return u +} + +// SetNull sets a column as null value. +func (u *UpdateBuilder) SetNull(column string) *UpdateBuilder { + u.nulls = append(u.nulls, column) + return u +} + +// Where adds a where predicate for update statement. +func (u *UpdateBuilder) Where(p *Predicate) *UpdateBuilder { + if u.where != nil { + u.where = And(u.where, p) + } else { + u.where = p + } + return u +} + +// FromSelect makes it possible to update entities that match the sub-query. +func (u *UpdateBuilder) FromSelect(s *Selector) *UpdateBuilder { + u.Where(s.where) + if t := s.Table(); t != nil { + u.table = t.name + } + return u +} + +// Empty reports whether this builder does not contain update changes. +func (u *UpdateBuilder) Empty() bool { + return len(u.columns) == 0 && len(u.nulls) == 0 +} + +// OrderBy appends the `ORDER BY` clause to the `UPDATE` statement. +// Supported by SQLite and MySQL. +func (u *UpdateBuilder) OrderBy(columns ...string) *UpdateBuilder { + if u.postgres() { + u.AddError(errors.New("ORDER BY is not supported by PostgreSQL")) + return u + } + for i := range columns { + u.order = append(u.order, columns[i]) + } + return u +} + +// Limit appends the `LIMIT` clause to the `UPDATE` statement. +// Supported by SQLite and MySQL. +func (u *UpdateBuilder) Limit(limit int) *UpdateBuilder { + if u.postgres() { + u.AddError(errors.New("LIMIT is not supported by PostgreSQL")) + return u + } + u.limit = &limit + return u +} + +// Prefix prefixes the UPDATE statement with list of statements. +func (u *UpdateBuilder) Prefix(stmts ...Querier) *UpdateBuilder { + u.prefix = append(u.prefix, stmts...) + return u +} + +// Returning adds the `RETURNING` clause to the insert statement. +// Supported by SQLite and PostgreSQL. +func (u *UpdateBuilder) Returning(columns ...string) *UpdateBuilder { + u.returning = columns + return u +} + +// Query returns query representation of an `UPDATE` statement. +func (u *UpdateBuilder) Query() (string, []any) { + b := u.Builder.clone() + if len(u.prefix) > 0 { + b.join(u.prefix, " ") + b.Pad() + } + b.WriteString("UPDATE ") + b.writeSchema(u.schema) + b.Ident(u.table).WriteString(" SET ") + u.writeSetter(&b) + if u.where != nil { + b.WriteString(" WHERE ") + b.Join(u.where) + } + joinReturning(u.returning, &b) + joinOrder(u.order, &b) + if u.limit != nil { + b.WriteString(" LIMIT ") + b.WriteString(strconv.Itoa(*u.limit)) + } + return b.String(), b.args +} + +// writeSetter writes the "SET" clause for the UPDATE statement. +func (u *UpdateBuilder) writeSetter(b *Builder) { + for i, c := range u.nulls { + if i > 0 { + b.Comma() + } + b.Ident(c).WriteString(" = NULL") + } + if len(u.nulls) > 0 && len(u.columns) > 0 { + b.Comma() + } + for i, c := range u.columns { + if i > 0 { + b.Comma() + } + b.Ident(c).WriteString(" = ") + switch v := u.values[i].(type) { + case Querier: + b.Join(v) + default: + b.Arg(v) + } + } +} + +// DeleteBuilder is a builder for `DELETE` statement. +type DeleteBuilder struct { + Builder + table string + schema string + where *Predicate +} + +// Delete creates a builder for the `DELETE` statement. +// +// Delete("users"). +// Where( +// Or( +// EQ("name", "foo").And().EQ("age", 10), +// EQ("name", "bar").And().EQ("age", 20), +// And( +// EQ("name", "qux"), +// EQ("age", 1).Or().EQ("age", 2), +// ), +// ), +// ) +func Delete(table string) *DeleteBuilder { return &DeleteBuilder{table: table} } + +// Schema sets the database name for the table whose row will be deleted. +func (d *DeleteBuilder) Schema(name string) *DeleteBuilder { + d.schema = name + return d +} + +// Where appends a where predicate to the `DELETE` statement. +func (d *DeleteBuilder) Where(p *Predicate) *DeleteBuilder { + if d.where != nil { + d.where = And(d.where, p) + } else { + d.where = p + } + return d +} + +// FromSelect makes it possible to delete a sub query. +func (d *DeleteBuilder) FromSelect(s *Selector) *DeleteBuilder { + d.Where(s.where) + if t := s.Table(); t != nil { + d.table = t.name + } + return d +} + +// Query returns query representation of a `DELETE` statement. +func (d *DeleteBuilder) Query() (string, []any) { + d.WriteString("DELETE FROM ") + d.writeSchema(d.schema) + d.Ident(d.table) + if d.where != nil { + d.WriteString(" WHERE ") + d.Join(d.where) + } + return d.String(), d.args +} + +// Predicate is a where predicate. +type Predicate struct { + Builder + depth int + fns []func(*Builder) +} + +// P creates a new predicate. +// +// P().EQ("name", "a8m").And().EQ("age", 30) +func P(fns ...func(*Builder)) *Predicate { + return &Predicate{fns: fns} +} + +// ExprP creates a new predicate from the given expression. +// +// ExprP("A = ? AND B > ?", args...) +func ExprP(exr string, args ...any) *Predicate { + return P(func(b *Builder) { + b.Join(Expr(exr, args...)) + }) +} + +// Or combines all given predicates with OR between them. +// +// Or(EQ("name", "foo"), EQ("name", "bar")) +func Or(preds ...*Predicate) *Predicate { + p := P() + return p.Append(func(b *Builder) { + p.mayWrap(preds, b, "OR") + }) +} + +// False appends the FALSE keyword to the predicate. +// +// Delete().From("users").Where(False()) +func False() *Predicate { + return P().False() +} + +// False appends FALSE to the predicate. +func (p *Predicate) False() *Predicate { + return p.Append(func(b *Builder) { + b.WriteString("FALSE") + }) +} + +// Not wraps the given predicate with the not predicate. +// +// Not(Or(EQ("name", "foo"), EQ("name", "bar"))) +func Not(pred *Predicate) *Predicate { + return P().Not().Append(func(b *Builder) { + b.Wrap(func(b *Builder) { + b.Join(pred) + }) + }) +} + +// Not appends NOT to the predicate. +func (p *Predicate) Not() *Predicate { + return p.Append(func(b *Builder) { + b.WriteString("NOT ") + }) +} + +// ColumnsOp returns a new predicate between 2 columns. +func ColumnsOp(col1, col2 string, op Op) *Predicate { + return P().ColumnsOp(col1, col2, op) +} + +// ColumnsOp appends the given predicate between 2 columns. +func (p *Predicate) ColumnsOp(col1, col2 string, op Op) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col1) + b.WriteOp(op) + b.Ident(col2) + }) +} + +// And combines all given predicates with AND between them. +func And(preds ...*Predicate) *Predicate { + p := P() + return p.Append(func(b *Builder) { + p.mayWrap(preds, b, "AND") + }) +} + +// IsTrue appends a predicate that checks if the column value is truthy. +func IsTrue(col string) *Predicate { + return P().IsTrue(col) +} + +// IsTrue appends a predicate that checks if the column value is truthy. +func (p *Predicate) IsTrue(col string) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col) + }) +} + +// IsFalse appends a predicate that checks if the column value is falsey. +func IsFalse(col string) *Predicate { + return P().IsFalse(col) +} + +// IsFalse appends a predicate that checks if the column value is falsey. +func (p *Predicate) IsFalse(col string) *Predicate { + return p.Append(func(b *Builder) { + b.WriteString("NOT ").Ident(col) + }) +} + +// EQ returns a "=" predicate. +func EQ(col string, value any) *Predicate { + return P().EQ(col, value) +} + +// EQ appends a "=" predicate. +func (p *Predicate) EQ(col string, arg any) *Predicate { + // A small optimization to avoid passing + // arguments when it can be avoided. + switch arg := arg.(type) { + case bool: + if arg { + return IsTrue(col) + } + return IsFalse(col) + default: + return p.Append(func(b *Builder) { + b.Ident(col) + b.WriteOp(OpEQ) + p.arg(b, arg) + }) + } +} + +// ColumnsEQ appends a "=" predicate between 2 columns. +func ColumnsEQ(col1, col2 string) *Predicate { + return P().ColumnsEQ(col1, col2) +} + +// ColumnsEQ appends a "=" predicate between 2 columns. +func (p *Predicate) ColumnsEQ(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpEQ) +} + +// NEQ returns a "<>" predicate. +func NEQ(col string, value any) *Predicate { + return P().NEQ(col, value) +} + +// NEQ appends a "<>" predicate. +func (p *Predicate) NEQ(col string, arg any) *Predicate { + // A small optimization to avoid passing + // arguments when it can be avoided. + switch arg := arg.(type) { + case bool: + if arg { + return IsFalse(col) + } + return IsTrue(col) + default: + return p.Append(func(b *Builder) { + b.Ident(col) + b.WriteOp(OpNEQ) + p.arg(b, arg) + }) + } +} + +// ColumnsNEQ appends a "<>" predicate between 2 columns. +func ColumnsNEQ(col1, col2 string) *Predicate { + return P().ColumnsNEQ(col1, col2) +} + +// ColumnsNEQ appends a "<>" predicate between 2 columns. +func (p *Predicate) ColumnsNEQ(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpNEQ) +} + +// LT returns a "<" predicate. +func LT(col string, value any) *Predicate { + return P().LT(col, value) +} + +// LT appends a "<" predicate. +func (p *Predicate) LT(col string, arg any) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col) + p.WriteOp(OpLT) + p.arg(b, arg) + }) +} + +// ColumnsLT appends a "<" predicate between 2 columns. +func ColumnsLT(col1, col2 string) *Predicate { + return P().ColumnsLT(col1, col2) +} + +// ColumnsLT appends a "<" predicate between 2 columns. +func (p *Predicate) ColumnsLT(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpLT) +} + +// LTE returns a "<=" predicate. +func LTE(col string, value any) *Predicate { + return P().LTE(col, value) +} + +// LTE appends a "<=" predicate. +func (p *Predicate) LTE(col string, arg any) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col) + p.WriteOp(OpLTE) + p.arg(b, arg) + }) +} + +// ColumnsLTE appends a "<=" predicate between 2 columns. +func ColumnsLTE(col1, col2 string) *Predicate { + return P().ColumnsLTE(col1, col2) +} + +// ColumnsLTE appends a "<=" predicate between 2 columns. +func (p *Predicate) ColumnsLTE(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpLTE) +} + +// GT returns a ">" predicate. +func GT(col string, value any) *Predicate { + return P().GT(col, value) +} + +// GT appends a ">" predicate. +func (p *Predicate) GT(col string, arg any) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col) + p.WriteOp(OpGT) + p.arg(b, arg) + }) +} + +// ColumnsGT appends a ">" predicate between 2 columns. +func ColumnsGT(col1, col2 string) *Predicate { + return P().ColumnsGT(col1, col2) +} + +// ColumnsGT appends a ">" predicate between 2 columns. +func (p *Predicate) ColumnsGT(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpGT) +} + +// GTE returns a ">=" predicate. +func GTE(col string, value any) *Predicate { + return P().GTE(col, value) +} + +// GTE appends a ">=" predicate. +func (p *Predicate) GTE(col string, arg any) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col) + p.WriteOp(OpGTE) + p.arg(b, arg) + }) +} + +// ColumnsGTE appends a ">=" predicate between 2 columns. +func ColumnsGTE(col1, col2 string) *Predicate { + return P().ColumnsGTE(col1, col2) +} + +// ColumnsGTE appends a ">=" predicate between 2 columns. +func (p *Predicate) ColumnsGTE(col1, col2 string) *Predicate { + return p.ColumnsOp(col1, col2, OpGTE) +} + +// NotNull returns the `IS NOT NULL` predicate. +func NotNull(col string) *Predicate { + return P().NotNull(col) +} + +// NotNull appends the `IS NOT NULL` predicate. +func (p *Predicate) NotNull(col string) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col).WriteString(" IS NOT NULL") + }) +} + +// IsNull returns the `IS NULL` predicate. +func IsNull(col string) *Predicate { + return P().IsNull(col) +} + +// IsNull appends the `IS NULL` predicate. +func (p *Predicate) IsNull(col string) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col).WriteString(" IS NULL") + }) +} + +// In returns the `IN` predicate. +func In(col string, args ...any) *Predicate { + return P().In(col, args...) +} + +// In appends the `IN` predicate. +func (p *Predicate) In(col string, args ...any) *Predicate { + // If no arguments were provided, append the FALSE constant, since + // we cannot apply "IN ()". This will make this predicate falsy. + if len(args) == 0 { + return p.False() + } + return p.Append(func(b *Builder) { + b.Ident(col).WriteOp(OpIn) + b.Wrap(func(b *Builder) { + if s, ok := args[0].(*Selector); ok { + b.Join(s) + } else { + b.Args(args...) + } + }) + }) +} + +// InInts returns the `IN` predicate for ints. +func InInts(col string, args ...int) *Predicate { + return P().InInts(col, args...) +} + +// InValues adds the `IN` predicate for slice of driver.Value. +func InValues(col string, args ...driver.Value) *Predicate { + return P().InValues(col, args...) +} + +// InInts adds the `IN` predicate for ints. +func (p *Predicate) InInts(col string, args ...int) *Predicate { + iface := make([]any, len(args)) + for i := range args { + iface[i] = args[i] + } + return p.In(col, iface...) +} + +// InValues adds the `IN` predicate for slice of driver.Value. +func (p *Predicate) InValues(col string, args ...driver.Value) *Predicate { + iface := make([]any, len(args)) + for i := range args { + iface[i] = args[i] + } + return p.In(col, iface...) +} + +// NotIn returns the `Not IN` predicate. +func NotIn(col string, args ...any) *Predicate { + return P().NotIn(col, args...) +} + +// NotIn appends the `Not IN` predicate. +func (p *Predicate) NotIn(col string, args ...any) *Predicate { + // If no arguments were provided, append the NOT FALSE constant, since + // we cannot apply "NOT IN ()". This will make this predicate truthy. + if len(args) == 0 { + return Not(p.False()) + } + return p.Append(func(b *Builder) { + b.Ident(col).WriteOp(OpNotIn) + b.Wrap(func(b *Builder) { + if s, ok := args[0].(*Selector); ok { + b.Join(s) + } else { + b.Args(args...) + } + }) + }) +} + +// Exists returns the `Exists` predicate. +func Exists(query Querier) *Predicate { + return P().Exists(query) +} + +// Exists appends the `EXISTS` predicate with the given query. +func (p *Predicate) Exists(query Querier) *Predicate { + return p.Append(func(b *Builder) { + b.WriteString("EXISTS ") + b.Wrap(func(b *Builder) { + b.Join(query) + }) + }) +} + +// NotExists returns the `NotExists` predicate. +func NotExists(query Querier) *Predicate { + return P().NotExists(query) +} + +// NotExists appends the `NOT EXISTS` predicate with the given query. +func (p *Predicate) NotExists(query Querier) *Predicate { + return p.Append(func(b *Builder) { + b.WriteString("NOT EXISTS ") + b.Wrap(func(b *Builder) { + b.Join(query) + }) + }) +} + +// Like returns the `LIKE` predicate. +func Like(col, pattern string) *Predicate { + return P().Like(col, pattern) +} + +// Like appends the `LIKE` predicate. +func (p *Predicate) Like(col, pattern string) *Predicate { + return p.Append(func(b *Builder) { + b.Ident(col).WriteOp(OpLike) + b.Arg(pattern) + }) +} + +// escape escapes w with the default escape character ('/'), +// to be used by the pattern matching functions below. +// The second return value indicates if w was escaped or not. +func escape(w string) (string, bool) { + var n int + for i := range w { + if c := w[i]; c == '%' || c == '_' || c == '\\' { + n++ + } + } + // No characters to escape. + if n == 0 { + return w, false + } + var b strings.Builder + b.Grow(len(w) + n) + for _, c := range w { + if c == '%' || c == '_' || c == '\\' { + b.WriteByte('\\') + } + b.WriteRune(c) + } + return b.String(), true +} + +func (p *Predicate) escapedLike(col, left, right, word string) *Predicate { + return p.Append(func(b *Builder) { + w, escaped := escape(word) + b.Ident(col).WriteOp(OpLike) + b.Arg(left + w + right) + if p.dialect == dialect.SQLite && escaped { + p.WriteString(" ESCAPE ").Arg("\\") + } + }) +} + +// HasPrefix is a helper predicate that checks prefix using the LIKE predicate. +func HasPrefix(col, prefix string) *Predicate { + return P().HasPrefix(col, prefix) +} + +// HasPrefix is a helper predicate that checks prefix using the LIKE predicate. +func (p *Predicate) HasPrefix(col, prefix string) *Predicate { + return p.escapedLike(col, "", "%", prefix) +} + +// HasSuffix is a helper predicate that checks suffix using the LIKE predicate. +func HasSuffix(col, suffix string) *Predicate { return P().HasSuffix(col, suffix) } + +// HasSuffix is a helper predicate that checks suffix using the LIKE predicate. +func (p *Predicate) HasSuffix(col, suffix string) *Predicate { + return p.escapedLike(col, "%", "", suffix) +} + +// EqualFold is a helper predicate that applies the "=" predicate with case-folding. +func EqualFold(col, sub string) *Predicate { return P().EqualFold(col, sub) } + +// EqualFold is a helper predicate that applies the "=" predicate with case-folding. +func (p *Predicate) EqualFold(col, sub string) *Predicate { + return p.Append(func(b *Builder) { + f := &Func{} + f.SetDialect(b.dialect) + switch b.dialect { + case dialect.MySQL: + // We assume the CHARACTER SET is configured to utf8mb4, + // because this how it is defined in dialect/sql/schema. + b.Ident(col).WriteString(" COLLATE utf8mb4_general_ci = ") + b.Arg(strings.ToLower(sub)) + case dialect.Postgres: + b.Ident(col).WriteString(" ILIKE ") + w, _ := escape(sub) + b.Arg(strings.ToLower(w)) + default: // SQLite. + f.Lower(col) + b.WriteString(f.String()) + b.WriteOp(OpEQ) + b.Arg(strings.ToLower(sub)) + } + }) +} + +// Contains is a helper predicate that checks substring using the LIKE predicate. +func Contains(col, sub string) *Predicate { return P().Contains(col, sub) } + +// Contains is a helper predicate that checks substring using the LIKE predicate. +func (p *Predicate) Contains(col, substr string) *Predicate { + return p.escapedLike(col, "%", "%", substr) +} + +// ContainsFold is a helper predicate that checks substring using the LIKE predicate with case-folding. +func ContainsFold(col, sub string) *Predicate { return P().ContainsFold(col, sub) } + +// ContainsFold is a helper predicate that applies the LIKE predicate with case-folding. +func (p *Predicate) ContainsFold(col, substr string) *Predicate { + return p.Append(func(b *Builder) { + w, escaped := escape(substr) + switch b.dialect { + case dialect.MySQL: + // We assume the CHARACTER SET is configured to utf8mb4, + // because this how it is defined in dialect/sql/schema. + b.Ident(col).WriteString(" COLLATE utf8mb4_general_ci LIKE ") + b.Arg("%" + strings.ToLower(w) + "%") + case dialect.Postgres: + b.Ident(col).WriteString(" ILIKE ") + b.Arg("%" + strings.ToLower(w) + "%") + default: // SQLite. + var f Func + f.SetDialect(b.dialect) + f.Lower(col) + b.WriteString(f.String()).WriteString(" LIKE ") + b.Arg("%" + strings.ToLower(w) + "%") + if escaped { + p.WriteString(" ESCAPE ").Arg("\\") + } + } + }) +} + +// CompositeGT returns a composite ">" predicate +func CompositeGT(columns []string, args ...any) *Predicate { + return P().CompositeGT(columns, args...) +} + +// CompositeLT returns a composite "<" predicate +func CompositeLT(columns []string, args ...any) *Predicate { + return P().CompositeLT(columns, args...) +} + +func (p *Predicate) compositeP(operator string, columns []string, args ...any) *Predicate { + return p.Append(func(b *Builder) { + b.Wrap(func(nb *Builder) { + nb.IdentComma(columns...) + }) + b.WriteString(operator) + b.WriteString("(") + b.Args(args...) + b.WriteString(")") + }) +} + +// CompositeGT returns a composite ">" predicate. +func (p *Predicate) CompositeGT(columns []string, args ...any) *Predicate { + const operator = " > " + return p.compositeP(operator, columns, args...) +} + +// CompositeLT appends a composite "<" predicate. +func (p *Predicate) CompositeLT(columns []string, args ...any) *Predicate { + const operator = " < " + return p.compositeP(operator, columns, args...) +} + +// Append appends a new function to the predicate callbacks. +// The callback list are executed on call to Query. +func (p *Predicate) Append(f func(*Builder)) *Predicate { + p.fns = append(p.fns, f) + return p +} + +// Query returns query representation of a predicate. +func (p *Predicate) Query() (string, []any) { + if p.Len() > 0 || len(p.args) > 0 { + p.Reset() + p.args = nil + } + for _, f := range p.fns { + f(&p.Builder) + } + return p.String(), p.args +} + +// arg calls Builder.Arg, but wraps `a` with parens in case of a Selector. +func (*Predicate) arg(b *Builder, a any) { + switch a.(type) { + case *Selector: + b.Wrap(func(b *Builder) { + b.Arg(a) + }) + default: + b.Arg(a) + } +} + +// clone returns a shallow clone of p. +func (p *Predicate) clone() *Predicate { + if p == nil { + return p + } + return &Predicate{fns: append([]func(*Builder){}, p.fns...)} +} + +func (p *Predicate) mayWrap(preds []*Predicate, b *Builder, op string) { + switch n := len(preds); { + case n == 1: + b.Join(preds[0]) + return + case n > 1 && p.depth != 0: + b.WriteByte('(') + defer b.WriteByte(')') + } + for i := range preds { + preds[i].depth = p.depth + 1 + if i > 0 { + b.WriteByte(' ') + b.WriteString(op) + b.WriteByte(' ') + } + if len(preds[i].fns) > 1 { + b.Wrap(func(b *Builder) { + b.Join(preds[i]) + }) + } else { + b.Join(preds[i]) + } + } +} + +// Func represents an SQL function. +type Func struct { + Builder + fns []func(*Builder) +} + +// Lower wraps the given column with the LOWER function. +// +// P().EQ(sql.Lower("name"), "a8m") +func Lower(ident string) string { + f := &Func{} + f.Lower(ident) + return f.String() +} + +// Lower wraps the given ident with the LOWER function. +func (f *Func) Lower(ident string) { + f.byName("LOWER", ident) +} + +// Count wraps the ident with the COUNT aggregation function. +func Count(ident string) string { + f := &Func{} + f.Count(ident) + return f.String() +} + +// Count wraps the ident with the COUNT aggregation function. +func (f *Func) Count(ident string) { + f.byName("COUNT", ident) +} + +// Max wraps the ident with the MAX aggregation function. +func Max(ident string) string { + f := &Func{} + f.Max(ident) + return f.String() +} + +// Max wraps the ident with the MAX aggregation function. +func (f *Func) Max(ident string) { + f.byName("MAX", ident) +} + +// Min wraps the ident with the MIN aggregation function. +func Min(ident string) string { + f := &Func{} + f.Min(ident) + return f.String() +} + +// Min wraps the ident with the MIN aggregation function. +func (f *Func) Min(ident string) { + f.byName("MIN", ident) +} + +// Sum wraps the ident with the SUM aggregation function. +func Sum(ident string) string { + f := &Func{} + f.Sum(ident) + return f.String() +} + +// Sum wraps the ident with the SUM aggregation function. +func (f *Func) Sum(ident string) { + f.byName("SUM", ident) +} + +// Avg wraps the ident with the AVG aggregation function. +func Avg(ident string) string { + f := &Func{} + f.Avg(ident) + return f.String() +} + +// Avg wraps the ident with the AVG aggregation function. +func (f *Func) Avg(ident string) { + f.byName("AVG", ident) +} + +// byName wraps an identifier with a function name. +func (f *Func) byName(fn, ident string) { + f.Append(func(b *Builder) { + f.WriteString(fn) + f.Wrap(func(b *Builder) { + b.Ident(ident) + }) + }) +} + +// Append appends a new function to the function callbacks. +// The callback list are executed on call to String. +func (f *Func) Append(fn func(*Builder)) *Func { + f.fns = append(f.fns, fn) + return f +} + +// String implements the fmt.Stringer. +func (f *Func) String() string { + for _, fn := range f.fns { + fn(&f.Builder) + } + return f.Builder.String() +} + +// As suffixed the given column with an alias (`a` AS `b`). +func As(ident string, as string) string { + b := &Builder{} + b.fromIdent(ident) + b.Ident(ident).Pad().WriteString("AS") + b.Pad().Ident(as) + return b.String() +} + +// Distinct prefixed the given columns with the `DISTINCT` keyword (DISTINCT `id`). +func Distinct(idents ...string) string { + b := &Builder{} + if len(idents) > 0 { + b.fromIdent(idents[0]) + } + b.WriteString("DISTINCT") + b.Pad().IdentComma(idents...) + return b.String() +} + +// TableView is a view that returns a table view. Can be a Table, Selector or a View (WITH statement). +type TableView interface { + view() + // C returns a formatted string prefixed + // with the table view qualifier. + C(string) string +} + +// queryView allows using Querier (expressions) in the FROM clause. +type queryView struct{ Querier } + +func (*queryView) view() {} + +func (q *queryView) C(column string) string { + if tv, ok := q.Querier.(TableView); ok { + return tv.C(column) + } + return column +} + +// SelectTable is a table selector. +type SelectTable struct { + Builder + as string + name string + schema string + quote bool +} + +// Table returns a new table selector. +// +// t1 := Table("users").As("u") +// return Select(t1.C("name")) +func Table(name string) *SelectTable { + return &SelectTable{quote: true, name: name} +} + +// Schema sets the schema name of the table. +func (s *SelectTable) Schema(name string) *SelectTable { + s.schema = name + return s +} + +// As adds the AS clause to the table selector. +func (s *SelectTable) As(alias string) *SelectTable { + s.as = alias + return s +} + +// C returns a formatted string for the table column. +func (s *SelectTable) C(column string) string { + name := s.name + if s.as != "" { + name = s.as + } + b := &Builder{dialect: s.dialect} + if s.as == "" { + b.writeSchema(s.schema) + } + b.Ident(name).WriteByte('.').Ident(column) + return b.String() +} + +// Columns returns a list of formatted strings for the table columns. +func (s *SelectTable) Columns(columns ...string) []string { + names := make([]string, 0, len(columns)) + for _, c := range columns { + names = append(names, s.C(c)) + } + return names +} + +// Unquote makes the table name to be formatted as raw string (unquoted). +// It is useful when you don't want to query tables under the current database. +// For example: "INFORMATION_SCHEMA.TABLE_CONSTRAINTS" in MySQL. +func (s *SelectTable) Unquote() *SelectTable { + s.quote = false + return s +} + +// ref returns the table reference. +func (s *SelectTable) ref() string { + if !s.quote { + return s.name + } + b := &Builder{dialect: s.dialect} + b.writeSchema(s.schema) + b.Ident(s.name) + if s.as != "" { + b.WriteString(" AS ") + b.Ident(s.as) + } + return b.String() +} + +// implement the table view. +func (*SelectTable) view() {} + +// join table option. +type join struct { + on *Predicate + kind string + table TableView +} + +// clone a joiner. +func (j join) clone() join { + if sel, ok := j.table.(*Selector); ok { + j.table = sel.Clone() + } + j.on = j.on.clone() + return j +} + +// Selector is a builder for the `SELECT` statement. +type Selector struct { + Builder + // ctx stores contextual data typically from + // generated code such as alternate table schemas. + ctx context.Context + as string + selection []selection + from []TableView + joins []join + where *Predicate + or bool + not bool + order []any + group []string + having *Predicate + limit *int + offset *int + distinct bool + setOps []setOp + prefix Queries + lock *LockOptions +} + +// WithContext sets the context into the *Selector. +func (s *Selector) WithContext(ctx context.Context) *Selector { + if ctx == nil { + panic("nil context") + } + s.ctx = ctx + return s +} + +// Context returns the Selector context or Background +// if nil. +func (s *Selector) Context() context.Context { + if s.ctx != nil { + return s.ctx + } + return context.Background() +} + +// Select returns a new selector for the `SELECT` statement. +// +// t1 := Table("users").As("u") +// t2 := Select().From(Table("groups")).Where(EQ("user_id", 10)).As("g") +// return Select(t1.C("id"), t2.C("name")). +// From(t1). +// Join(t2). +// On(t1.C("id"), t2.C("user_id")) +func Select(columns ...string) *Selector { + return (&Selector{}).Select(columns...) +} + +// SelectExpr is like Select, but supports passing arbitrary +// expressions for SELECT clause. +func SelectExpr(exprs ...Querier) *Selector { + return (&Selector{}).SelectExpr(exprs...) +} + +// selection represents a column or an expression selection. +type selection struct { + x Querier + c string + as string +} + +// Select changes the columns selection of the SELECT statement. +// Empty selection means all columns *. +func (s *Selector) Select(columns ...string) *Selector { + s.selection = make([]selection, len(columns)) + for i := range columns { + s.selection[i] = selection{c: columns[i]} + } + return s +} + +// AppendSelect appends additional columns to the SELECT statement. +func (s *Selector) AppendSelect(columns ...string) *Selector { + for i := range columns { + s.selection = append(s.selection, selection{c: columns[i]}) + } + return s +} + +// AppendSelectAs appends additional column to the SELECT statement with the given alias. +func (s *Selector) AppendSelectAs(column, as string) *Selector { + s.selection = append(s.selection, selection{c: column, as: as}) + return s +} + +// SelectExpr changes the columns selection of the SELECT statement +// with custom list of expressions. +func (s *Selector) SelectExpr(exprs ...Querier) *Selector { + s.selection = make([]selection, len(exprs)) + for i := range exprs { + s.selection[i] = selection{x: exprs[i]} + } + return s +} + +// AppendSelectExpr appends additional expressions to the SELECT statement. +func (s *Selector) AppendSelectExpr(exprs ...Querier) *Selector { + for i := range exprs { + s.selection = append(s.selection, selection{x: exprs[i]}) + } + return s +} + +// AppendSelectExprAs appends additional expressions to the SELECT statement with the given name. +func (s *Selector) AppendSelectExprAs(expr Querier, as string) *Selector { + x := expr + if _, ok := expr.(*raw); !ok { + x = ExprFunc(func(b *Builder) { + b.S("(").Join(expr).S(")") + }) + } + s.selection = append(s.selection, selection{ + x: x, + as: as, + }) + return s +} + +// FindSelection returns all occurrences in the selection that match the given column name. +// For example, for column "a" the following match: a, "a", "t"."a", "t"."b" AS "a". +func (s *Selector) FindSelection(name string) (matches []string) { + matchC := func(qualified string) bool { + switch ident, pg := s.isIdent(qualified), s.postgres(); { + case !ident: + if i := strings.IndexRune(qualified, '.'); i > 0 { + return qualified[i+1:] == name + } + case ident && pg: + if i := strings.Index(qualified, `"."`); i > 0 { + return s.unquote(qualified[i+2:]) == name + } + case ident: + if i := strings.Index(qualified, "`.`"); i > 0 { + return s.unquote(qualified[i+2:]) == name + } + } + return false + } + for _, c := range s.selection { + switch { + // Match aliases. + case c.as != "": + if ident := s.isIdent(c.as); !ident && c.as == name || ident && s.unquote(c.as) == name { + matches = append(matches, c.as) + } + // Match qualified columns. + case c.c != "" && s.isQualified(c.c) && matchC(c.c): + matches = append(matches, c.c) + // Match unqualified columns. + case c.c != "" && (c.c == name || s.isIdent(c.c) && s.unquote(c.c) == name): + matches = append(matches, c.c) + } + } + return matches +} + +// SelectedColumns returns the selected columns in the Selector. +func (s *Selector) SelectedColumns() []string { + columns := make([]string, 0, len(s.selection)) + for i := range s.selection { + if c := s.selection[i].c; c != "" { + columns = append(columns, c) + } + } + return columns +} + +// UnqualifiedColumns returns an unqualified version of the +// selected columns in the Selector. e.g. "t1"."c" => "c". +func (s *Selector) UnqualifiedColumns() []string { + columns := make([]string, 0, len(s.selection)) + for i := range s.selection { + c := s.selection[i].c + if c == "" { + continue + } + if s.isIdent(c) { + parts := strings.FieldsFunc(c, func(r rune) bool { + return r == '`' || r == '"' + }) + if n := len(parts); n > 0 && parts[n-1] != "" { + c = parts[n-1] + } + } + columns = append(columns, c) + } + return columns +} + +// From sets the source of `FROM` clause. +func (s *Selector) From(t TableView) *Selector { + s.from = nil + return s.AppendFrom(t) +} + +// AppendFrom appends a new TableView to the `FROM` clause. +func (s *Selector) AppendFrom(t TableView) *Selector { + s.from = append(s.from, t) + if st, ok := t.(state); ok { + st.SetDialect(s.dialect) + } + return s +} + +// FromExpr sets the expression of `FROM` clause. +func (s *Selector) FromExpr(x Querier) *Selector { + s.from = nil + return s.AppendFromExpr(x) +} + +// AppendFromExpr appends an expression (Queries) to the `FROM` clause. +func (s *Selector) AppendFromExpr(x Querier) *Selector { + s.from = append(s.from, &queryView{Querier: x}) + if st, ok := x.(state); ok { + st.SetDialect(s.dialect) + } + return s +} + +// Distinct adds the DISTINCT keyword to the `SELECT` statement. +func (s *Selector) Distinct() *Selector { + s.distinct = true + return s +} + +// SetDistinct sets explicitly if the returned rows are distinct or indistinct. +func (s *Selector) SetDistinct(v bool) *Selector { + s.distinct = v + return s +} + +// Limit adds the `LIMIT` clause to the `SELECT` statement. +func (s *Selector) Limit(limit int) *Selector { + s.limit = &limit + return s +} + +// Offset adds the `OFFSET` clause to the `SELECT` statement. +func (s *Selector) Offset(offset int) *Selector { + s.offset = &offset + return s +} + +// Where sets or appends the given predicate to the statement. +func (s *Selector) Where(p *Predicate) *Selector { + if s.not { + p = Not(p) + s.not = false + } + switch { + case s.where == nil: + s.where = p + case s.where != nil && s.or: + s.where = Or(s.where, p) + s.or = false + default: + s.where = And(s.where, p) + } + return s +} + +// P returns the predicate of a selector. +func (s *Selector) P() *Predicate { + return s.where +} + +// SetP sets explicitly the predicate function for the selector and clear its previous state. +func (s *Selector) SetP(p *Predicate) *Selector { + s.where = p + s.or = false + s.not = false + return s +} + +// FromSelect copies the predicate from a selector. +func (s *Selector) FromSelect(s2 *Selector) *Selector { + s.where = s2.where + return s +} + +// Not sets the next coming predicate with not. +func (s *Selector) Not() *Selector { + s.not = true + return s +} + +// Or sets the next coming predicate with OR operator (disjunction). +func (s *Selector) Or() *Selector { + s.or = true + return s +} + +// Table returns the selected table. +func (s *Selector) Table() *SelectTable { + if len(s.from) == 0 { + return nil + } + return selectTable(s.from[0]) +} + +// selectTable returns a *SelectTable from the given TableView. +func selectTable(t TableView) *SelectTable { + if t == nil { + return nil + } + switch view := t.(type) { + case *SelectTable: + return view + case *Selector: + if len(view.from) == 0 { + return nil + } + return selectTable(view.from[0]) + case *queryView, *WithBuilder: + return nil + default: + panic(fmt.Sprintf("unexpected TableView %T", t)) + } +} + +// TableName returns the name of the selected table or alias of selector. +func (s *Selector) TableName() string { + switch view := s.from[0].(type) { + case *SelectTable: + return view.name + case *Selector: + return view.as + default: + panic(fmt.Sprintf("unhandled TableView type %T", s.from)) + } +} + +// HasJoins reports if the selector has any JOINs. +func (s *Selector) HasJoins() bool { + return len(s.joins) > 0 +} + +// JoinedTable returns the first joined table with the given name. +func (s *Selector) JoinedTable(name string) (*SelectTable, bool) { + for _, j := range s.joins { + if t := selectTable(j.table); t != nil && t.name == name { + return t, true + } + } + return nil, false +} + +// JoinedTableView returns the first joined TableView with the given name or alias. +func (s *Selector) JoinedTableView(name string) (TableView, bool) { + for _, j := range s.joins { + switch t := j.table.(type) { + case *SelectTable: + if t.name == name || t.as == name { + return t, true + } + case *Selector: + if t.as == name { + return t, true + } + for _, t2 := range t.from { + if t3 := selectTable(t2); t3 != nil && (t3.name == name || t3.as == name) { + return t3, true + } + } + } + } + return nil, false +} + +// Join appends a `JOIN` clause to the statement. +func (s *Selector) Join(t TableView) *Selector { + return s.join("JOIN", t) +} + +// LeftJoin appends a `LEFT JOIN` clause to the statement. +func (s *Selector) LeftJoin(t TableView) *Selector { + return s.join("LEFT JOIN", t) +} + +// RightJoin appends a `RIGHT JOIN` clause to the statement. +func (s *Selector) RightJoin(t TableView) *Selector { + return s.join("RIGHT JOIN", t) +} + +// FullJoin appends a `FULL JOIN` clause to the statement. +func (s *Selector) FullJoin(t TableView) *Selector { + return s.join("FULL JOIN", t) +} + +// join adds a join table to the selector with the given kind. +func (s *Selector) join(kind string, t TableView) *Selector { + s.joins = append(s.joins, join{ + kind: kind, + table: t, + }) + switch view := t.(type) { + case *SelectTable: + if view.as == "" { + view.as = "t" + strconv.Itoa(len(s.joins)) + } + case *Selector: + if view.as == "" { + view.as = "t" + strconv.Itoa(len(s.joins)) + } + } + if st, ok := t.(state); ok { + st.SetDialect(s.dialect) + } + return s +} + +type ( + // setOp represents a set/compound operation. + setOp struct { + Type setOpType // Set operation type. + All bool // Quantifier was set to ALL (defaults to DISTINCT). + TableView // Query or table to operate on. + } + // setOpType is a set operation type. + setOpType string +) + +const ( + setOpTypeUnion setOpType = "UNION" + setOpTypeExcept setOpType = "EXCEPT" + setOpTypeIntersect setOpType = "INTERSECT" +) + +// Union appends the UNION (DISTINCT) clause to the query. +func (s *Selector) Union(t TableView) *Selector { + s.setOps = append(s.setOps, setOp{ + Type: setOpTypeUnion, + TableView: t, + }) + return s +} + +// UnionAll appends the UNION ALL clause to the query. +func (s *Selector) UnionAll(t TableView) *Selector { + s.setOps = append(s.setOps, setOp{ + Type: setOpTypeUnion, + All: true, + TableView: t, + }) + return s +} + +// UnionDistinct appends the UNION DISTINCT clause to the query. +// Deprecated: use Union instead as by default, duplicate rows +// are eliminated unless ALL is specified. +func (s *Selector) UnionDistinct(t TableView) *Selector { + return s.Union(t) +} + +// Except appends the EXCEPT clause to the query. +func (s *Selector) Except(t TableView) *Selector { + s.setOps = append(s.setOps, setOp{ + Type: setOpTypeExcept, + TableView: t, + }) + return s +} + +// ExceptAll appends the EXCEPT ALL clause to the query. +func (s *Selector) ExceptAll(t TableView) *Selector { + if s.sqlite() { + s.AddError(errors.New("EXCEPT ALL is not supported by SQLite")) + } else { + s.setOps = append(s.setOps, setOp{ + Type: setOpTypeExcept, + All: true, + TableView: t, + }) + } + return s +} + +// Intersect appends the INTERSECT clause to the query. +func (s *Selector) Intersect(t TableView) *Selector { + s.setOps = append(s.setOps, setOp{ + Type: setOpTypeIntersect, + TableView: t, + }) + return s +} + +// IntersectAll appends the INTERSECT ALL clause to the query. +func (s *Selector) IntersectAll(t TableView) *Selector { + if s.sqlite() { + s.AddError(errors.New("INTERSECT ALL is not supported by SQLite")) + } else { + s.setOps = append(s.setOps, setOp{ + Type: setOpTypeIntersect, + All: true, + TableView: t, + }) + } + return s +} + +// Prefix prefixes the query with list of queries. +func (s *Selector) Prefix(queries ...Querier) *Selector { + s.prefix = append(s.prefix, queries...) + return s +} + +// C returns a formatted string for a selected column from this statement. +func (s *Selector) C(column string) string { + // Skip formatting qualified columns. + if s.isQualified(column) { + return column + } + if s.as != "" { + b := &Builder{dialect: s.dialect} + b.Ident(s.as) + b.WriteByte('.') + b.Ident(column) + return b.String() + } + return s.Table().C(column) +} + +// Columns returns a list of formatted strings for a selected columns from this statement. +func (s *Selector) Columns(columns ...string) []string { + names := make([]string, 0, len(columns)) + for _, c := range columns { + names = append(names, s.C(c)) + } + return names +} + +// OnP sets or appends the given predicate for the `ON` clause of the statement. +func (s *Selector) OnP(p *Predicate) *Selector { + if len(s.joins) > 0 { + join := &s.joins[len(s.joins)-1] + switch { + case join.on == nil: + join.on = p + default: + join.on = And(join.on, p) + } + } + return s +} + +// On sets the `ON` clause for the `JOIN` operation. +func (s *Selector) On(c1, c2 string) *Selector { + s.OnP(P(func(builder *Builder) { + builder.Ident(c1).WriteOp(OpEQ).Ident(c2) + })) + return s +} + +// As give this selection an alias. +func (s *Selector) As(alias string) *Selector { + s.as = alias + return s +} + +// Count sets the Select statement to be a `SELECT COUNT(*)`. +func (s *Selector) Count(columns ...string) *Selector { + column := "*" + if len(columns) > 0 { + b := &Builder{} + b.IdentComma(columns...) + column = b.String() + } + s.Select(Count(column)) + return s +} + +// LockAction tells the transaction what to do in case of +// requesting a row that is locked by other transaction. +type LockAction string + +const ( + // NoWait means never wait and returns an error. + NoWait LockAction = "NOWAIT" + // SkipLocked means never wait and skip. + SkipLocked LockAction = "SKIP LOCKED" +) + +// LockStrength defines the strength of the lock (see the list below). +type LockStrength string + +// A list of all locking clauses. +const ( + LockShare LockStrength = "SHARE" + LockUpdate LockStrength = "UPDATE" + LockNoKeyUpdate LockStrength = "NO KEY UPDATE" + LockKeyShare LockStrength = "KEY SHARE" +) + +type ( + // LockOptions defines a SELECT statement + // lock for protecting concurrent updates. + LockOptions struct { + // Strength of the lock. + Strength LockStrength + // Action of the lock. + Action LockAction + // Tables are an option tables. + Tables []string + // custom clause for locking. + clause string + } + // LockOption allows configuring the LockOptions using functional options. + LockOption func(*LockOptions) +) + +// WithLockAction sets the Action of the lock. +func WithLockAction(action LockAction) LockOption { + return func(c *LockOptions) { + c.Action = action + } +} + +// WithLockTables sets the Tables of the lock. +func WithLockTables(tables ...string) LockOption { + return func(c *LockOptions) { + c.Tables = tables + } +} + +// WithLockClause allows providing a custom clause for +// locking the statement. For example, in MySQL <= 8.22: +// +// Select(). +// From(Table("users")). +// ForShare( +// WithLockClause("LOCK IN SHARE MODE"), +// ) +func WithLockClause(clause string) LockOption { + return func(c *LockOptions) { + c.clause = clause + } +} + +// For sets the lock configuration for suffixing the `SELECT` +// statement with the `FOR [SHARE | UPDATE] ...` clause. +func (s *Selector) For(l LockStrength, opts ...LockOption) *Selector { + if s.Dialect() == dialect.SQLite { + s.AddError(errors.New("sql: SELECT .. FOR UPDATE/SHARE not supported in SQLite")) + } + s.lock = &LockOptions{Strength: l} + for _, opt := range opts { + opt(s.lock) + } + return s +} + +// ForShare sets the lock configuration for suffixing the +// `SELECT` statement with the `FOR SHARE` clause. +func (s *Selector) ForShare(opts ...LockOption) *Selector { + return s.For(LockShare, opts...) +} + +// ForUpdate sets the lock configuration for suffixing the +// `SELECT` statement with the `FOR UPDATE` clause. +func (s *Selector) ForUpdate(opts ...LockOption) *Selector { + return s.For(LockUpdate, opts...) +} + +// Clone returns a duplicate of the selector, including all associated steps. It can be +// used to prepare common SELECT statements and use them differently after the clone is made. +func (s *Selector) Clone() *Selector { + if s == nil { + return nil + } + joins := make([]join, len(s.joins)) + for i := range s.joins { + joins[i] = s.joins[i].clone() + } + return &Selector{ + Builder: s.Builder.clone(), + ctx: s.ctx, + as: s.as, + or: s.or, + not: s.not, + from: s.from, + limit: s.limit, + offset: s.offset, + distinct: s.distinct, + where: s.where.clone(), + having: s.having.clone(), + joins: append([]join{}, joins...), + group: append([]string{}, s.group...), + order: append([]any{}, s.order...), + selection: append([]selection{}, s.selection...), + } +} + +// Asc adds the ASC suffix for the given column. +func Asc(column string) string { + b := &Builder{} + b.Ident(column).WriteString(" ASC") + return b.String() +} + +// Desc adds the DESC suffix for the given column. +func Desc(column string) string { + b := &Builder{} + b.Ident(column).WriteString(" DESC") + return b.String() +} + +// DescExpr returns a new expression where the DESC suffix is added. +func DescExpr(x Querier) Querier { + return ExprFunc(func(b *Builder) { + b.Join(x) + b.WriteString(" DESC") + }) +} + +// OrderBy appends the `ORDER BY` clause to the `SELECT` statement. +func (s *Selector) OrderBy(columns ...string) *Selector { + for i := range columns { + s.order = append(s.order, columns[i]) + } + return s +} + +// OrderColumns returns the ordered columns in the Selector. +// Note, this function skips columns selected with expressions. +func (s *Selector) OrderColumns() []string { + columns := make([]string, 0, len(s.order)) + for i := range s.order { + if c, ok := s.order[i].(string); ok { + columns = append(columns, c) + } + } + return columns +} + +// OrderExpr appends the `ORDER BY` clause to the `SELECT` +// statement with custom list of expressions. +func (s *Selector) OrderExpr(exprs ...Querier) *Selector { + for i := range exprs { + s.order = append(s.order, exprs[i]) + } + return s +} + +// OrderExprFunc appends the `ORDER BY` expression that evaluates +// the given function. +func (s *Selector) OrderExprFunc(f func(*Builder)) *Selector { + return s.OrderExpr( + Dialect(s.Dialect()).Expr(f), + ) +} + +// ClearOrder clears the ORDER BY clause to be empty. +func (s *Selector) ClearOrder() *Selector { + s.order = nil + return s +} + +// GroupBy appends the `GROUP BY` clause to the `SELECT` statement. +func (s *Selector) GroupBy(columns ...string) *Selector { + s.group = append(s.group, columns...) + return s +} + +// Having appends a predicate for the `HAVING` clause. +func (s *Selector) Having(p *Predicate) *Selector { + s.having = p + return s +} + +// Query returns query representation of a `SELECT` statement. +func (s *Selector) Query() (string, []any) { + b := s.Builder.clone() + s.joinPrefix(&b) + b.WriteString("SELECT ") + if s.distinct { + b.WriteString("DISTINCT ") + } + if len(s.selection) > 0 { + s.joinSelect(&b) + } else { + b.WriteString("*") + } + if len(s.from) > 0 { + b.WriteString(" FROM ") + } + for i, from := range s.from { + if i > 0 { + b.Comma() + } + switch t := from.(type) { + case *SelectTable: + t.SetDialect(s.dialect) + b.WriteString(t.ref()) + case *Selector: + t.SetDialect(s.dialect) + b.Wrap(func(b *Builder) { + b.Join(t) + }) + if t.as != "" { + b.WriteString(" AS ") + b.Ident(t.as) + } + case *WithBuilder: + t.SetDialect(s.dialect) + b.Ident(t.Name()) + case *queryView: + b.Join(t.Querier) + } + } + for _, join := range s.joins { + b.WriteString(" " + join.kind + " ") + switch view := join.table.(type) { + case *SelectTable: + view.SetDialect(s.dialect) + b.WriteString(view.ref()) + case *Selector: + view.SetDialect(s.dialect) + b.Wrap(func(b *Builder) { + b.Join(view) + }) + b.WriteString(" AS ") + b.Ident(view.as) + case *WithBuilder: + view.SetDialect(s.dialect) + b.Ident(view.Name()) + } + if join.on != nil { + b.WriteString(" ON ") + b.Join(join.on) + } + } + if s.where != nil { + b.WriteString(" WHERE ") + b.Join(s.where) + } + if len(s.group) > 0 { + b.WriteString(" GROUP BY ") + b.IdentComma(s.group...) + } + if s.having != nil { + b.WriteString(" HAVING ") + b.Join(s.having) + } + if len(s.setOps) > 0 { + s.joinSetOps(&b) + } + joinOrder(s.order, &b) + if s.limit != nil { + b.WriteString(" LIMIT ") + b.WriteString(strconv.Itoa(*s.limit)) + } + if s.offset != nil { + b.WriteString(" OFFSET ") + b.WriteString(strconv.Itoa(*s.offset)) + } + s.joinLock(&b) + s.total = b.total + s.AddError(b.Err()) + return b.String(), b.args +} + +func (s *Selector) joinPrefix(b *Builder) { + if len(s.prefix) > 0 { + b.join(s.prefix, " ") + b.Pad() + } +} + +func (s *Selector) joinLock(b *Builder) { + if s.lock == nil { + return + } + b.Pad() + if s.lock.clause != "" { + b.WriteString(s.lock.clause) + return + } + b.WriteString("FOR ").WriteString(string(s.lock.Strength)) + if len(s.lock.Tables) > 0 { + b.WriteString(" OF ").IdentComma(s.lock.Tables...) + } + if s.lock.Action != "" { + b.Pad().WriteString(string(s.lock.Action)) + } +} + +func (s *Selector) joinSetOps(b *Builder) { + for _, op := range s.setOps { + b.WriteString(" " + string(op.Type) + " ") + if op.All { + b.WriteString("ALL ") + } + switch view := op.TableView.(type) { + case *SelectTable: + view.SetDialect(s.dialect) + b.WriteString(view.ref()) + case *Selector: + view.SetDialect(s.dialect) + b.Join(view) + if view.as != "" { + b.WriteString(" AS ") + b.Ident(view.as) + } + } + } +} + +func joinOrder(order []any, b *Builder) { + if len(order) == 0 { + return + } + b.WriteString(" ORDER BY ") + for i := range order { + if i > 0 { + b.Comma() + } + switch r := order[i].(type) { + case string: + b.Ident(r) + case Querier: + b.Join(r) + } + } +} + +func joinReturning(columns []string, b *Builder) { + if len(columns) == 0 || (!b.postgres() && !b.sqlite()) { + return + } + b.WriteString(" RETURNING ") + b.IdentComma(columns...) +} + +func (s *Selector) joinSelect(b *Builder) { + for i, sc := range s.selection { + if i > 0 { + b.Comma() + } + switch { + case sc.c != "": + b.Ident(sc.c) + case sc.x != nil: + b.Join(sc.x) + } + if sc.as != "" { + b.WriteString(" AS ") + b.Ident(sc.as) + } + } +} + +// implement the table view interface. +func (*Selector) view() {} + +// WithBuilder is the builder for the `WITH` statement. +type WithBuilder struct { + Builder + recursive bool + ctes []struct { + name string + columns []string + s *Selector + } +} + +// With returns a new builder for the `WITH` statement. +// +// n := Queries{ +// With("users_view").As(Select().From(Table("users"))), +// Select().From(Table("users_view")), +// } +// return n.Query() +func With(name string, columns ...string) *WithBuilder { + return &WithBuilder{ + ctes: []struct { + name string + columns []string + s *Selector + }{ + {name: name, columns: columns}, + }, + } +} + +// WithRecursive returns a new builder for the `WITH RECURSIVE` statement. +// +// n := Queries{ +// WithRecursive("users_view").As(Select().From(Table("users"))), +// Select().From(Table("users_view")), +// } +// return n.Query() +func WithRecursive(name string, columns ...string) *WithBuilder { + w := With(name, columns...) + w.recursive = true + return w +} + +// Name returns the name of the view. +func (w *WithBuilder) Name() string { + return w.ctes[0].name +} + +// As sets the view sub query. +func (w *WithBuilder) As(s *Selector) *WithBuilder { + w.ctes[len(w.ctes)-1].s = s + return w +} + +// With appends another named CTE to the statement. +func (w *WithBuilder) With(name string, columns ...string) *WithBuilder { + w.ctes = append(w.ctes, With(name, columns...).ctes...) + return w +} + +// C returns a formatted string for the WITH column. +func (w *WithBuilder) C(column string) string { + b := &Builder{dialect: w.dialect} + b.Ident(w.Name()).WriteByte('.').Ident(column) + return b.String() +} + +// Query returns query representation of a `WITH` clause. +func (w *WithBuilder) Query() (string, []any) { + w.WriteString("WITH ") + if w.recursive { + w.WriteString("RECURSIVE ") + } + for i, cte := range w.ctes { + if i > 0 { + w.Comma() + } + w.Ident(cte.name) + if len(cte.columns) > 0 { + w.WriteByte('(') + w.IdentComma(cte.columns...) + w.WriteByte(')') + } + w.WriteString(" AS ") + w.Wrap(func(b *Builder) { + b.Join(cte.s) + }) + } + return w.String(), w.args +} + +// implement the table view interface. +func (*WithBuilder) view() {} + +// WindowBuilder represents a builder for a window clause. +// Note that window functions support is limited and used +// only to query rows-limited edges in pagination. +type WindowBuilder struct { + Builder + fn func(*Builder) // e.g. ROW_NUMBER(), RANK() + partition func(*Builder) + order []any +} + +// RowNumber returns a new window clause with the ROW_NUMBER() as a function. +// Using this function will assign each row a number, from 1 to N, in the +// order defined by the ORDER BY clause in the window spec. +func RowNumber() *WindowBuilder { + return Window(func(b *Builder) { + b.WriteString("ROW_NUMBER()") + }) +} + +// Window returns a new window clause with a custom selector allowing +// for custom window functions. +// +// Window(func(b *Builder) { +// b.WriteString(Sum(posts.C("duration"))) +// }).PartitionBy("author_id").OrderBy("id"), "duration"). +func Window(fn func(*Builder)) *WindowBuilder { + return &WindowBuilder{fn: fn} +} + +// PartitionBy indicates to divide the query rows into groups by the given columns. +// Note that, standard SQL spec allows partition only by columns, and in order to +// use the "expression" version, use the PartitionByExpr. +func (w *WindowBuilder) PartitionBy(columns ...string) *WindowBuilder { + w.partition = func(b *Builder) { + b.IdentComma(columns...) + } + return w +} + +// PartitionExpr indicates to divide the query rows into groups by the given expression. +func (w *WindowBuilder) PartitionExpr(x Querier) *WindowBuilder { + w.partition = func(b *Builder) { + b.Join(x) + } + return w +} + +// OrderBy indicates how to sort rows in each partition. +func (w *WindowBuilder) OrderBy(columns ...string) *WindowBuilder { + for i := range columns { + w.order = append(w.order, columns[i]) + } + return w +} + +// OrderExpr appends the `ORDER BY` clause to the window +// partition with custom list of expressions. +func (w *WindowBuilder) OrderExpr(exprs ...Querier) *WindowBuilder { + for i := range exprs { + w.order = append(w.order, exprs[i]) + } + return w +} + +// Query returns query representation of the window function. +func (w *WindowBuilder) Query() (string, []any) { + w.fn(&w.Builder) + w.WriteString(" OVER ") + w.Wrap(func(b *Builder) { + if w.partition != nil { + b.WriteString("PARTITION BY ") + w.partition(b) + } + joinOrder(w.order, b) + }) + return w.Builder.String(), w.args +} + +// Wrapper wraps a given Querier with different format. +// Used to prefix/suffix other queries. +type Wrapper struct { + format string + wrapped Querier +} + +// Query returns query representation of a wrapped Querier. +func (w *Wrapper) Query() (string, []any) { + query, args := w.wrapped.Query() + return fmt.Sprintf(w.format, query), args +} + +// SetDialect calls SetDialect on the wrapped query. +func (w *Wrapper) SetDialect(name string) { + if s, ok := w.wrapped.(state); ok { + s.SetDialect(name) + } +} + +// Dialect calls Dialect on the wrapped query. +func (w *Wrapper) Dialect() string { + if s, ok := w.wrapped.(state); ok { + return s.Dialect() + } + return "" +} + +// Total returns the total number of arguments so far. +func (w *Wrapper) Total() int { + if s, ok := w.wrapped.(state); ok { + return s.Total() + } + return 0 +} + +// SetTotal sets the value of the total arguments. +// Used to pass this information between sub queries/expressions. +func (w *Wrapper) SetTotal(total int) { + if s, ok := w.wrapped.(state); ok { + s.SetTotal(total) + } +} + +// Raw returns a raw SQL query that is placed as-is in the query. +func Raw(s string) Querier { return &raw{s} } + +type raw struct{ s string } + +func (r *raw) Query() (string, []any) { return r.s, nil } + +// Expr returns an SQL expression that implements the Querier interface. +func Expr(exr string, args ...any) Querier { return &expr{s: exr, args: args} } + +type expr struct { + s string + args []any +} + +func (e *expr) Query() (string, []any) { return e.s, e.args } + +// ExprFunc returns an expression function that implements the Querier interface. +// +// Update("users"). +// Set("x", ExprFunc(func(b *Builder) { +// // The sql.Builder config (argc and dialect) +// // was set before the function was executed. +// b.Ident("x").WriteOp(OpAdd).Arg(1) +// })) +func ExprFunc(fn func(*Builder)) Querier { + return &exprFunc{fn: fn} +} + +type exprFunc struct { + Builder + fn func(*Builder) +} + +func (e *exprFunc) Query() (string, []any) { + b := e.Builder.clone() + e.fn(&b) + return b.Query() +} + +// Queries are list of queries join with space between them. +type Queries []Querier + +// Query returns query representation of Queriers. +func (n Queries) Query() (string, []any) { + b := &Builder{} + for i := range n { + if i > 0 { + b.Pad() + } + query, args := n[i].Query() + b.WriteString(query) + b.args = append(b.args, args...) + } + return b.String(), b.args +} + +// Builder is the base query builder for the sql dsl. +type Builder struct { + sb *strings.Builder // underlying builder. + dialect string // configured dialect. + args []any // query parameters. + total int // total number of parameters in query tree. + errs []error // errors that added during the query construction. + qualifier string // qualifier to prefix identifiers (e.g. table name). +} + +// Quote quotes the given identifier with the characters based +// on the configured dialect. It defaults to "`". +func (b *Builder) Quote(ident string) string { + quote := "`" + switch { + case b.postgres(): + // If it was quoted with the wrong + // identifier character. + if strings.Contains(ident, "`") { + return strings.ReplaceAll(ident, "`", `"`) + } + quote = `"` + // An identifier for unknown dialect. + case b.dialect == "" && strings.ContainsAny(ident, "`\""): + return ident + } + return quote + ident + quote +} + +// Ident appends the given string as an identifier. +func (b *Builder) Ident(s string) *Builder { + switch { + case len(s) == 0: + case !strings.HasSuffix(s, "*") && !b.isIdent(s) && !isFunc(s) && !isModifier(s) && !isAlias(s): + if b.qualifier != "" { + b.WriteString(b.Quote(b.qualifier)).WriteByte('.') + } + b.WriteString(b.Quote(s)) + case (isFunc(s) || isModifier(s) || isAlias(s)) && b.postgres(): + // Modifiers and aggregation functions that + // were called without dialect information. + b.WriteString(strings.ReplaceAll(s, "`", `"`)) + default: + b.WriteString(s) + } + return b +} + +// IdentComma calls Ident on all arguments and adds a comma between them. +func (b *Builder) IdentComma(s ...string) *Builder { + for i := range s { + if i > 0 { + b.Comma() + } + b.Ident(s[i]) + } + return b +} + +// String returns the accumulated string. +func (b *Builder) String() string { + if b.sb == nil { + return "" + } + return b.sb.String() +} + +// WriteByte wraps the Buffer.WriteByte to make it chainable with other methods. +func (b *Builder) WriteByte(c byte) *Builder { + if b.sb == nil { + b.sb = &strings.Builder{} + } + b.sb.WriteByte(c) + return b +} + +// WriteString wraps the Buffer.WriteString to make it chainable with other methods. +func (b *Builder) WriteString(s string) *Builder { + if b.sb == nil { + b.sb = &strings.Builder{} + } + b.sb.WriteString(s) + return b +} + +// S is a short version of WriteString. +func (b *Builder) S(s string) *Builder { + return b.WriteString(s) +} + +// Len returns the number of accumulated bytes. +func (b *Builder) Len() int { + if b.sb == nil { + return 0 + } + return b.sb.Len() +} + +// Reset resets the Builder to be empty. +func (b *Builder) Reset() *Builder { + if b.sb != nil { + b.sb.Reset() + } + return b +} + +// AddError appends an error to the builder errors. +func (b *Builder) AddError(err error) *Builder { + // allowed nil error make build process easier + if err != nil { + b.errs = append(b.errs, err) + } + return b +} + +func (b *Builder) writeSchema(schema string) { + if schema != "" && b.dialect != dialect.SQLite { + b.Ident(schema).WriteByte('.') + } +} + +// Err returns a concatenated error of all errors encountered during +// the query-building, or were added manually by calling AddError. +func (b *Builder) Err() error { + if len(b.errs) == 0 { + return nil + } + br := strings.Builder{} + for i := range b.errs { + if i > 0 { + br.WriteString("; ") + } + br.WriteString(b.errs[i].Error()) + } + return fmt.Errorf(br.String()) +} + +// An Op represents an operator. +type Op int + +// Predicate and arithmetic operators. +const ( + OpEQ Op = iota // = + OpNEQ // <> + OpGT // > + OpGTE // >= + OpLT // < + OpLTE // <= + OpIn // IN + OpNotIn // NOT IN + OpLike // LIKE + OpIsNull // IS NULL + OpNotNull // IS NOT NULL + OpAdd // + + OpSub // - + OpMul // * + OpDiv // / (Quotient) + OpMod // % (Reminder) +) + +var ops = [...]string{ + OpEQ: "=", + OpNEQ: "<>", + OpGT: ">", + OpGTE: ">=", + OpLT: "<", + OpLTE: "<=", + OpIn: "IN", + OpNotIn: "NOT IN", + OpLike: "LIKE", + OpIsNull: "IS NULL", + OpNotNull: "IS NOT NULL", + OpAdd: "+", + OpSub: "-", + OpMul: "*", + OpDiv: "/", + OpMod: "%", +} + +// WriteOp writes an operator to the builder. +func (b *Builder) WriteOp(op Op) *Builder { + switch { + case op >= OpEQ && op <= OpLike || op >= OpAdd && op <= OpMod: + b.Pad().WriteString(ops[op]).Pad() + case op == OpIsNull || op == OpNotNull: + b.Pad().WriteString(ops[op]) + default: + panic(fmt.Sprintf("invalid op %d", op)) + } + return b +} + +type ( + // StmtInfo holds an information regarding + // the statement + StmtInfo struct { + // The Dialect of the SQL driver. + Dialect string + } + // ParamFormatter wraps the FormatPram function. + ParamFormatter interface { + // The FormatParam function lets users define + // custom placeholder formatting for their types. + // For example, formatting the default placeholder + // from '?' to 'ST_GeomFromWKB(?)' for MySQL dialect. + FormatParam(placeholder string, info *StmtInfo) string + } +) + +// Arg appends an input argument to the builder. +func (b *Builder) Arg(a any) *Builder { + switch v := a.(type) { + case nil: + b.WriteString("NULL") + return b + case *raw: + b.WriteString(v.s) + return b + case Querier: + b.Join(v) + return b + } + // Default placeholder param (MySQL and SQLite). + format := "?" + if b.postgres() { + // Postgres' arguments are referenced using the syntax $n. + // $1 refers to the 1st argument, $2 to the 2nd, and so on. + format = "$" + strconv.Itoa(b.total+1) + } + if f, ok := a.(ParamFormatter); ok { + format = f.FormatParam(format, &StmtInfo{ + Dialect: b.dialect, + }) + } + return b.Argf(format, a) +} + +// Args appends a list of arguments to the builder. +func (b *Builder) Args(a ...any) *Builder { + for i := range a { + if i > 0 { + b.Comma() + } + b.Arg(a[i]) + } + return b +} + +// Argf appends an input argument to the builder +// with the given format. For example: +// +// FormatArg("JSON(?)", b). +// FormatArg("ST_GeomFromText(?)", geom) +func (b *Builder) Argf(format string, a any) *Builder { + switch a := a.(type) { + case nil: + b.WriteString("NULL") + return b + case *raw: + b.WriteString(a.s) + return b + case Querier: + b.Join(a) + return b + } + b.total++ + b.args = append(b.args, a) + b.WriteString(format) + return b +} + +// Comma adds a comma to the query. +func (b *Builder) Comma() *Builder { + return b.WriteString(", ") +} + +// Pad adds a space to the query. +func (b *Builder) Pad() *Builder { + return b.WriteByte(' ') +} + +// Join joins a list of Queries to the builder. +func (b *Builder) Join(qs ...Querier) *Builder { + return b.join(qs, "") +} + +// JoinComma joins a list of Queries and adds comma between them. +func (b *Builder) JoinComma(qs ...Querier) *Builder { + return b.join(qs, ", ") +} + +// join a list of Queries to the builder with a given separator. +func (b *Builder) join(qs []Querier, sep string) *Builder { + for i, q := range qs { + if i > 0 { + b.WriteString(sep) + } + st, ok := q.(state) + if ok { + st.SetDialect(b.dialect) + st.SetTotal(b.total) + } + query, args := q.Query() + b.WriteString(query) + b.args = append(b.args, args...) + b.total += len(args) + if qe, ok := q.(querierErr); ok { + if err := qe.Err(); err != nil { + b.AddError(err) + } + } + } + return b +} + +// Wrap gets a callback, and wraps its result with parentheses. +func (b *Builder) Wrap(f func(*Builder)) *Builder { + nb := &Builder{dialect: b.dialect, total: b.total, sb: &strings.Builder{}} + nb.WriteByte('(') + f(nb) + nb.WriteByte(')') + b.WriteString(nb.String()) + b.args = append(b.args, nb.args...) + b.total = nb.total + return b +} + +// Nested gets a callback, and wraps its result with parentheses. +// +// Deprecated: Use Builder.Wrap instead. +func (b *Builder) Nested(f func(*Builder)) *Builder { + return b.Wrap(f) +} + +// SetDialect sets the builder dialect. It's used for garnering dialect specific queries. +func (b *Builder) SetDialect(dialect string) { + b.dialect = dialect +} + +// Dialect returns the dialect of the builder. +func (b Builder) Dialect() string { + return b.dialect +} + +// Total returns the total number of arguments so far. +func (b Builder) Total() int { + return b.total +} + +// SetTotal sets the value of the total arguments. +// Used to pass this information between sub queries/expressions. +func (b *Builder) SetTotal(total int) { + b.total = total +} + +// Query implements the Querier interface. +func (b Builder) Query() (string, []any) { + return b.String(), b.args +} + +// clone returns a shallow clone of a builder. +func (b Builder) clone() Builder { + c := Builder{dialect: b.dialect, total: b.total, sb: &strings.Builder{}} + if len(b.args) > 0 { + c.args = append(c.args, b.args...) + } + if b.sb != nil { + c.sb.WriteString(b.sb.String()) + } + return c +} + +// postgres reports if the builder dialect is PostgreSQL. +func (b Builder) postgres() bool { + return b.Dialect() == dialect.Postgres +} + +// sqlite reports if the builder dialect is SQLite. +func (b Builder) sqlite() bool { + return b.Dialect() == dialect.SQLite +} + +// fromIdent sets the builder dialect from the identifier format. +func (b *Builder) fromIdent(ident string) { + if strings.Contains(ident, `"`) { + b.SetDialect(dialect.Postgres) + } + // otherwise, use the default. +} + +// isIdent reports if the given string is a dialect identifier. +func (b *Builder) isIdent(s string) bool { + switch { + case b.postgres(): + return strings.Contains(s, `"`) + default: + return strings.Contains(s, "`") + } +} + +// unquote database identifiers. +func (b *Builder) unquote(s string) string { + switch pg := b.postgres(); { + case len(s) < 2: + case !pg && s[0] == '`' && s[len(s)-1] == '`', pg && s[0] == '"' && s[len(s)-1] == '"': + if u, err := strconv.Unquote(s); err == nil { + return u + } + } + return s +} + +// isIdent reports if the given string is a qualified identifier. +func (b *Builder) isQualified(s string) bool { + ident, pg := b.isIdent(s), b.postgres() + return !ident && len(s) > 2 && strings.ContainsRune(s[1:len(s)-1], '.') || // . + ident && pg && strings.Contains(s, `"."`) || // "qualifier"."column" + ident && !pg && strings.Contains(s, "`.`") // `qualifier`.`column` +} + +// state wraps the all methods for setting and getting +// update state between all queries in the query tree. +type state interface { + Dialect() string + SetDialect(string) + Total() int + SetTotal(int) +} + +// DialectBuilder prefixes all root builders with the `Dialect` constructor. +type DialectBuilder struct { + dialect string +} + +// Dialect creates a new DialectBuilder with the given dialect name. +func Dialect(name string) *DialectBuilder { + return &DialectBuilder{name} +} + +// String builds a dialect-aware expression string from the given callback. +func (d *DialectBuilder) String(f func(*Builder)) string { + b := &Builder{} + b.SetDialect(d.dialect) + f(b) + return b.String() +} + +// Expr builds a dialect-aware expression from the given callback. +func (d *DialectBuilder) Expr(f func(*Builder)) Querier { + return Expr(d.String(f)) +} + +// Describe creates a DescribeBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// Describe("users") +func (d *DialectBuilder) Describe(name string) *DescribeBuilder { + b := Describe(name) + b.SetDialect(d.dialect) + return b +} + +// CreateTable creates a TableBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// CreateTable("users"). +// Columns( +// Column("id").Type("int").Attr("auto_increment"), +// Column("name").Type("varchar(255)"), +// ). +// PrimaryKey("id") +func (d *DialectBuilder) CreateTable(name string) *TableBuilder { + b := CreateTable(name) + b.SetDialect(d.dialect) + return b +} + +// AlterTable creates a TableAlter for the configured dialect. +// +// Dialect(dialect.Postgres). +// AlterTable("users"). +// AddColumn(Column("group_id").Type("int").Attr("UNIQUE")). +// AddForeignKey(ForeignKey().Columns("group_id"). +// Reference(Reference().Table("groups").Columns("id")). +// OnDelete("CASCADE"), +// ) +func (d *DialectBuilder) AlterTable(name string) *TableAlter { + b := AlterTable(name) + b.SetDialect(d.dialect) + return b +} + +// AlterIndex creates an IndexAlter for the configured dialect. +// +// Dialect(dialect.Postgres). +// AlterIndex("old"). +// Rename("new") +func (d *DialectBuilder) AlterIndex(name string) *IndexAlter { + b := AlterIndex(name) + b.SetDialect(d.dialect) + return b +} + +// Column creates a ColumnBuilder for the configured dialect. +// +// Dialect(dialect.Postgres).. +// Column("group_id").Type("int").Attr("UNIQUE") +func (d *DialectBuilder) Column(name string) *ColumnBuilder { + b := Column(name) + b.SetDialect(d.dialect) + return b +} + +// Insert creates a InsertBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// Insert("users").Columns("age").Values(1) +func (d *DialectBuilder) Insert(table string) *InsertBuilder { + b := Insert(table) + b.SetDialect(d.dialect) + return b +} + +// Update creates a UpdateBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// Update("users").Set("name", "foo") +func (d *DialectBuilder) Update(table string) *UpdateBuilder { + b := Update(table) + b.SetDialect(d.dialect) + return b +} + +// Delete creates a DeleteBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// Delete().From("users") +func (d *DialectBuilder) Delete(table string) *DeleteBuilder { + b := Delete(table) + b.SetDialect(d.dialect) + return b +} + +// Select creates a Selector for the configured dialect. +// +// Dialect(dialect.Postgres). +// Select().From(Table("users")) +func (d *DialectBuilder) Select(columns ...string) *Selector { + b := Select(columns...) + b.SetDialect(d.dialect) + return b +} + +// SelectExpr is like Select, but supports passing arbitrary +// expressions for SELECT clause. +// +// Dialect(dialect.Postgres). +// SelectExpr(expr...). +// From(Table("users")) +func (d *DialectBuilder) SelectExpr(exprs ...Querier) *Selector { + b := SelectExpr(exprs...) + b.SetDialect(d.dialect) + return b +} + +// Table creates a SelectTable for the configured dialect. +// +// Dialect(dialect.Postgres). +// Table("users").As("u") +func (d *DialectBuilder) Table(name string) *SelectTable { + b := Table(name) + b.SetDialect(d.dialect) + return b +} + +// With creates a WithBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// With("users_view"). +// As(Select().From(Table("users"))) +func (d *DialectBuilder) With(name string) *WithBuilder { + b := With(name) + b.SetDialect(d.dialect) + return b +} + +// CreateIndex creates a IndexBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// CreateIndex("unique_name"). +// Unique(). +// Table("users"). +// Columns("first", "last") +func (d *DialectBuilder) CreateIndex(name string) *IndexBuilder { + b := CreateIndex(name) + b.SetDialect(d.dialect) + return b +} + +// DropIndex creates a DropIndexBuilder for the configured dialect. +// +// Dialect(dialect.Postgres). +// DropIndex("name") +func (d *DialectBuilder) DropIndex(name string) *DropIndexBuilder { + b := DropIndex(name) + b.SetDialect(d.dialect) + return b +} + +func isAlias(s string) bool { + return strings.Contains(s, " AS ") || strings.Contains(s, " as ") +} + +func isFunc(s string) bool { + return strings.Contains(s, "(") && strings.Contains(s, ")") +} + +func isModifier(s string) bool { + for _, m := range [...]string{"DISTINCT", "ALL", "WITH ROLLUP"} { + if strings.HasPrefix(s, m) { + return true + } + } + return false +} diff --git a/vendor/entgo.io/ent/dialect/sql/driver.go b/vendor/entgo.io/ent/dialect/sql/driver.go new file mode 100644 index 00000000..9a52ffa4 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/driver.go @@ -0,0 +1,184 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sql + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "strings" + + "entgo.io/ent/dialect" +) + +// Driver is a dialect.Driver implementation for SQL based databases. +type Driver struct { + Conn + dialect string +} + +// NewDriver creates a new Driver with the given Conn and dialect. +func NewDriver(dialect string, c Conn) *Driver { + return &Driver{dialect: dialect, Conn: c} +} + +// Open wraps the database/sql.Open method and returns a dialect.Driver that implements the an ent/dialect.Driver interface. +func Open(dialect, source string) (*Driver, error) { + db, err := sql.Open(dialect, source) + if err != nil { + return nil, err + } + return NewDriver(dialect, Conn{db}), nil +} + +// OpenDB wraps the given database/sql.DB method with a Driver. +func OpenDB(dialect string, db *sql.DB) *Driver { + return NewDriver(dialect, Conn{db}) +} + +// DB returns the underlying *sql.DB instance. +func (d Driver) DB() *sql.DB { + return d.ExecQuerier.(*sql.DB) +} + +// Dialect implements the dialect.Dialect method. +func (d Driver) Dialect() string { + // If the underlying driver is wrapped with a telemetry driver. + for _, name := range []string{dialect.MySQL, dialect.SQLite, dialect.Postgres} { + if strings.HasPrefix(d.dialect, name) { + return name + } + } + return d.dialect +} + +// Tx starts and returns a transaction. +func (d *Driver) Tx(ctx context.Context) (dialect.Tx, error) { + return d.BeginTx(ctx, nil) +} + +// BeginTx starts a transaction with options. +func (d *Driver) BeginTx(ctx context.Context, opts *TxOptions) (dialect.Tx, error) { + tx, err := d.DB().BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{ + Conn: Conn{tx}, + Tx: tx, + }, nil +} + +// Close closes the underlying connection. +func (d *Driver) Close() error { return d.DB().Close() } + +// Tx implements dialect.Tx interface. +type Tx struct { + Conn + driver.Tx +} + +// ExecQuerier wraps the standard Exec and Query methods. +type ExecQuerier interface { + ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) +} + +// Conn implements dialect.ExecQuerier given ExecQuerier. +type Conn struct { + ExecQuerier +} + +// Exec implements the dialect.Exec method. +func (c Conn) Exec(ctx context.Context, query string, args, v any) error { + argv, ok := args.([]any) + if !ok { + return fmt.Errorf("dialect/sql: invalid type %T. expect []any for args", v) + } + switch v := v.(type) { + case nil: + if _, err := c.ExecContext(ctx, query, argv...); err != nil { + return err + } + case *sql.Result: + res, err := c.ExecContext(ctx, query, argv...) + if err != nil { + return err + } + *v = res + default: + return fmt.Errorf("dialect/sql: invalid type %T. expect *sql.Result", v) + } + return nil +} + +// Query implements the dialect.Query method. +func (c Conn) Query(ctx context.Context, query string, args, v any) error { + vr, ok := v.(*Rows) + if !ok { + return fmt.Errorf("dialect/sql: invalid type %T. expect *sql.Rows", v) + } + argv, ok := args.([]any) + if !ok { + return fmt.Errorf("dialect/sql: invalid type %T. expect []any for args", args) + } + rows, err := c.QueryContext(ctx, query, argv...) + if err != nil { + return err + } + *vr = Rows{rows} + return nil +} + +var _ dialect.Driver = (*Driver)(nil) + +type ( + // Rows wraps the sql.Rows to avoid locks copy. + Rows struct{ ColumnScanner } + // Result is an alias to sql.Result. + Result = sql.Result + // NullBool is an alias to sql.NullBool. + NullBool = sql.NullBool + // NullInt64 is an alias to sql.NullInt64. + NullInt64 = sql.NullInt64 + // NullString is an alias to sql.NullString. + NullString = sql.NullString + // NullFloat64 is an alias to sql.NullFloat64. + NullFloat64 = sql.NullFloat64 + // NullTime represents a time.Time that may be null. + NullTime = sql.NullTime + // TxOptions holds the transaction options to be used in DB.BeginTx. + TxOptions = sql.TxOptions +) + +// NullScanner represents an sql.Scanner that may be null. +// NullScanner implements the sql.Scanner interface so it can +// be used as a scan destination, similar to the types above. +type NullScanner struct { + S sql.Scanner + Valid bool // Valid is true if the Scan value is not NULL. +} + +// Scan implements the Scanner interface. +func (n *NullScanner) Scan(value any) error { + n.Valid = value != nil + if n.Valid { + return n.S.Scan(value) + } + return nil +} + +// ColumnScanner is the interface that wraps the standard +// sql.Rows methods used for scanning database rows. +type ColumnScanner interface { + Close() error + ColumnTypes() ([]*sql.ColumnType, error) + Columns() ([]string, error) + Err() error + Next() bool + NextResultSet() bool + Scan(dest ...any) error +} diff --git a/vendor/entgo.io/ent/dialect/sql/scan.go b/vendor/entgo.io/ent/dialect/sql/scan.go new file mode 100644 index 00000000..581007dd --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/scan.go @@ -0,0 +1,420 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sql + +import ( + "database/sql" + "database/sql/driver" + "encoding/json" + "fmt" + "reflect" + "strings" + "time" +) + +// ScanOne scans one row to the given value. It fails if the rows holds more than 1 row. +func ScanOne(rows ColumnScanner, v any) error { + columns, err := rows.Columns() + if err != nil { + return fmt.Errorf("sql/scan: failed getting column names: %w", err) + } + if n := len(columns); n != 1 { + return fmt.Errorf("sql/scan: unexpected number of columns: %d", n) + } + if !rows.Next() { + if err := rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + if err := rows.Scan(v); err != nil { + return err + } + if rows.Next() { + return fmt.Errorf("sql/scan: expect exactly one row in result set") + } + return rows.Err() +} + +// ScanInt64 scans and returns an int64 from the rows. +func ScanInt64(rows ColumnScanner) (int64, error) { + var n int64 + if err := ScanOne(rows, &n); err != nil { + return 0, err + } + return n, nil +} + +// ScanInt scans and returns an int from the rows. +func ScanInt(rows ColumnScanner) (int, error) { + n, err := ScanInt64(rows) + if err != nil { + return 0, err + } + return int(n), nil +} + +// ScanBool scans and returns a boolean from the rows. +func ScanBool(rows ColumnScanner) (bool, error) { + var b bool + if err := ScanOne(rows, &b); err != nil { + return false, err + } + return b, nil +} + +// ScanString scans and returns a string from the rows. +func ScanString(rows ColumnScanner) (string, error) { + var s string + if err := ScanOne(rows, &s); err != nil { + return "", err + } + return s, nil +} + +// ScanValue scans and returns a driver.Value from the rows. +func ScanValue(rows ColumnScanner) (driver.Value, error) { + var v driver.Value + if err := ScanOne(rows, &v); err != nil { + return "", err + } + return v, nil +} + +// ScanSlice scans the given ColumnScanner (basically, sql.Row or sql.Rows) into the given slice. +func ScanSlice(rows ColumnScanner, v any) error { + columns, err := rows.Columns() + if err != nil { + return fmt.Errorf("sql/scan: failed getting column names: %w", err) + } + rv := reflect.ValueOf(v) + switch { + case rv.Kind() != reflect.Ptr: + if t := reflect.TypeOf(v); t != nil { + return fmt.Errorf("sql/scan: ScanSlice(non-pointer %s)", t) + } + fallthrough + case rv.IsNil(): + return fmt.Errorf("sql/scan: ScanSlice(nil)") + } + rv = reflect.Indirect(rv) + if k := rv.Kind(); k != reflect.Slice { + return fmt.Errorf("sql/scan: invalid type %s. expected slice as an argument", k) + } + scan, err := scanType(rv.Type().Elem(), columns) + if err != nil { + return err + } + if n, m := len(columns), len(scan.columns); n > m { + return fmt.Errorf("sql/scan: columns do not match (%d > %d)", n, m) + } + for rows.Next() { + values := scan.values() + if err := rows.Scan(values...); err != nil { + return fmt.Errorf("sql/scan: failed scanning rows: %w", err) + } + vv, err := scan.value(values...) + if err != nil { + return err + } + rv.Set(reflect.Append(rv, vv)) + } + return rows.Err() +} + +// rowScan is the configuration for scanning one sql.Row. +type rowScan struct { + // column types of a row. + columns []reflect.Type + // value functions that converts the row columns (result) to a reflect.Value. + value func(v ...any) (reflect.Value, error) +} + +// values returns a []any from the configured column types. +func (r *rowScan) values() []any { + values := make([]any, len(r.columns)) + for i := range r.columns { + values[i] = reflect.New(r.columns[i]).Interface() + } + return values +} + +// scanType returns rowScan for the given reflect.Type. +func scanType(typ reflect.Type, columns []string) (*rowScan, error) { + switch k := typ.Kind(); { + case assignable(typ): + return &rowScan{ + columns: []reflect.Type{typ}, + value: func(v ...any) (reflect.Value, error) { + return reflect.Indirect(reflect.ValueOf(v[0])), nil + }, + }, nil + case k == reflect.Ptr: + return scanPtr(typ, columns) + case k == reflect.Struct: + return scanStruct(typ, columns) + default: + return nil, fmt.Errorf("sql/scan: unsupported type ([]%s)", k) + } +} + +var ( + scannerType = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + nullJSONType = reflect.TypeOf((*nullJSON)(nil)).Elem() +) + +// nullJSON represents a json.RawMessage that may be NULL. +type nullJSON json.RawMessage + +// Scan implements the sql.Scanner interface. +func (j *nullJSON) Scan(v interface{}) error { + if v == nil { + return nil + } + *j = v.([]byte) + return nil +} + +// assignable reports if the given type can be assigned directly by `Rows.Scan`. +func assignable(typ reflect.Type) bool { + switch k := typ.Kind(); { + case typ.Implements(scannerType): + case k == reflect.Interface && typ.NumMethod() == 0: + case k == reflect.String || k >= reflect.Bool && k <= reflect.Float64: + case (k == reflect.Slice || k == reflect.Array) && typ.Elem().Kind() == reflect.Uint8: + default: + return false + } + return true +} + +// scanStruct returns the configuration for scanning a sql.Row into a struct. +func scanStruct(typ reflect.Type, columns []string) (*rowScan, error) { + var ( + scan = &rowScan{} + idxs = make([][]int, 0, typ.NumField()) + names = make(map[string][]int, typ.NumField()) + ) + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + // Skip unexported fields. + if f.PkgPath != "" { + continue + } + // Support 1-level embedding to accept types as `type T struct {ent.T; V int}`. + if typ := f.Type; f.Anonymous && typ.Kind() == reflect.Struct { + for j := 0; j < typ.NumField(); j++ { + names[columnName(typ.Field(j))] = []int{i, j} + } + continue + } + names[columnName(f)] = []int{i} + } + for _, c := range columns { + var idx []int + // Normalize columns if necessary, + // for example: COUNT(*) => count. + switch name := strings.Split(c, "(")[0]; { + case names[name] != nil: + idx = names[name] + case names[strings.ToLower(name)] != nil: + idx = names[strings.ToLower(name)] + default: + return nil, fmt.Errorf("sql/scan: missing struct field for column: %s (%s)", c, name) + } + idxs = append(idxs, idx) + rtype := typ.Field(idx[0]).Type + if len(idx) > 1 { + rtype = rtype.Field(idx[1]).Type + } + switch { + // If the field is not support by the standard + // convertAssign, assume it is a JSON field. + case !supportsScan(rtype): + rtype = nullJSONType + // Create a pointer to the actual reflect + // types to accept optional struct fields. + case !nillable(rtype): + rtype = reflect.PtrTo(rtype) + } + scan.columns = append(scan.columns, rtype) + } + scan.value = func(vs ...any) (reflect.Value, error) { + st := reflect.New(typ).Elem() + for i, v := range vs { + rv := reflect.Indirect(reflect.ValueOf(v)) + if rv.IsNil() { + continue + } + idx := idxs[i] + rvalue, ft := st.Field(idx[0]), st.Type().Field(idx[0]) + if len(idx) > 1 { + // Embedded field. + rvalue, ft = rvalue.Field(idx[1]), ft.Type.Field(idx[1]) + } + switch { + case rv.Type() == nullJSONType: + if rv = reflect.Indirect(rv); rv.IsNil() { + continue + } + if err := json.Unmarshal(rv.Bytes(), rvalue.Addr().Interface()); err != nil { + return reflect.Value{}, fmt.Errorf("unmarshal field %q: %w", ft.Name, err) + } + case !nillable(rvalue.Type()): + rv = reflect.Indirect(rv) + fallthrough + default: + rvalue.Set(rv) + } + } + return st, nil + } + return scan, nil +} + +// columnName returns the column name of a struct-field. +func columnName(f reflect.StructField) string { + name := strings.ToLower(f.Name) + if tag, ok := f.Tag.Lookup("sql"); ok { + name = tag + } else if tag, ok := f.Tag.Lookup("json"); ok { + name = strings.Split(tag, ",")[0] + } + return name +} + +// nillable reports if the reflect-type can have nil value. +func nillable(t reflect.Type) bool { + switch t.Kind() { + case reflect.Interface, reflect.Slice, reflect.Map, reflect.Ptr, reflect.UnsafePointer: + return true + } + return false +} + +// scanPtr wraps the underlying type with rowScan. +func scanPtr(typ reflect.Type, columns []string) (*rowScan, error) { + typ = typ.Elem() + scan, err := scanType(typ, columns) + if err != nil { + return nil, err + } + wrap := scan.value + scan.value = func(vs ...any) (reflect.Value, error) { + v, err := wrap(vs...) + if err != nil { + return reflect.Value{}, err + } + pt := reflect.PtrTo(v.Type()) + pv := reflect.New(pt.Elem()) + pv.Elem().Set(v) + return pv, nil + } + return scan, nil +} + +func supportsScan(t reflect.Type) bool { + if t.Implements(scannerType) || reflect.PtrTo(t).Implements(scannerType) { + return true + } + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + switch t.Kind() { + case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.Pointer, reflect.String: + return true + case reflect.Slice: + return t == reflect.TypeOf(sql.RawBytes(nil)) || t == reflect.TypeOf([]byte(nil)) + case reflect.Interface: + return t == reflect.TypeOf((*any)(nil)).Elem() + default: + return t == reflect.TypeOf(time.Time{}) || t.Implements(scannerType) + } +} + +// UnknownType is a named type to any indicates the info +// needs to be extracted from the underlying rows. +type UnknownType any + +// ScanTypeOf returns the type used for scanning column i from the database. +func ScanTypeOf(rows *Rows, i int) any { + unknown := new(any) + ct, err := rows.ColumnTypes() + if err != nil || len(ct) <= i { + return unknown + } + rt := ct[i].ScanType() + if rt.Kind() == reflect.Pointer { + rt = rt.Elem() + } + // Handle NULL values. + switch rt.Kind() { + case reflect.Bool: + rt = reflect.TypeOf(sql.NullBool{}) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + rt = reflect.TypeOf(sql.NullInt64{}) + case reflect.Float32, reflect.Float64: + rt = reflect.TypeOf(sql.NullFloat64{}) + case reflect.String: + rt = reflect.TypeOf(sql.NullString{}) + } + return reflect.New(rt).Interface() +} + +// SelectValues maps a selected column to its value. +// Used by the generated code for storing runtime selected columns/expressions. +type SelectValues map[string]any + +// Set sets the value of the given column. +func (s *SelectValues) Set(name string, v any) { + if *s == nil { + *s = make(SelectValues) + } + if pv, ok := v.(*any); ok && pv != nil { + v = *pv + } + (*s)[name] = v +} + +// Get returns the value of the given column. +func (s SelectValues) Get(name string) (any, error) { + v, ok := s[name] + if !ok { + return nil, fmt.Errorf("%s value was not selected", name) + } + if v == nil { + return nil, nil + } + switch rv := reflect.Indirect(reflect.ValueOf(v)).Interface().(type) { + case NullString: + if rv.Valid { + return rv.String, nil + } + case NullInt64: + if rv.Valid { + return rv.Int64, nil + } + case NullFloat64: + if rv.Valid { + return rv.Float64, nil + } + case NullBool: + if rv.Valid { + return rv.Bool, nil + } + case NullTime: + if rv.Valid { + return rv.Time, nil + } + case sql.RawBytes: + return []byte(rv), nil + default: + return rv, nil + } + return nil, nil +} diff --git a/vendor/entgo.io/ent/dialect/sql/schema/BUILD b/vendor/entgo.io/ent/dialect/sql/schema/BUILD new file mode 100644 index 00000000..5b2c0792 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/schema/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "schema", + srcs = [ + "atlas.go", + "inspect.go", + "migrate.go", + "mysql.go", + "postgres.go", + "schema.go", + "sqlite.go", + "writer.go", + ], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/dialect/sql/schema", + importpath = "entgo.io/ent/dialect/sql/schema", + visibility = ["//visibility:public"], + deps = [ + "//vendor/ariga.io/atlas/sql/migrate", + "//vendor/ariga.io/atlas/sql/mysql", + "//vendor/ariga.io/atlas/sql/postgres", + "//vendor/ariga.io/atlas/sql/schema", + "//vendor/ariga.io/atlas/sql/sqlclient", + "//vendor/ariga.io/atlas/sql/sqlite", + "//vendor/ariga.io/atlas/sql/sqltool", + "//vendor/entgo.io/ent/dialect", + "//vendor/entgo.io/ent/dialect/entsql", + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/entgo.io/ent/schema/field", + ], +) diff --git a/vendor/entgo.io/ent/dialect/sql/schema/atlas.go b/vendor/entgo.io/ent/dialect/sql/schema/atlas.go new file mode 100644 index 00000000..e2870dcd --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/schema/atlas.go @@ -0,0 +1,1202 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +import ( + "context" + "crypto/md5" + "database/sql" + "errors" + "fmt" + "net/url" + "reflect" + "sort" + "strings" + + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" + "ariga.io/atlas/sql/sqlclient" + "ariga.io/atlas/sql/sqltool" + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" + "entgo.io/ent/schema/field" +) + +// Atlas atlas migration engine. +type Atlas struct { + atDriver migrate.Driver + sqlDialect sqlDialect + + legacy bool // if the legacy migration engine instead of Atlas should be used + withFixture bool // deprecated: with fks rename fixture + sum bool // deprecated: sum file generation will be required + + indent string // plan indentation + errNoPlan bool // no plan error enabled + universalID bool // global unique ids + dropColumns bool // drop deleted columns + dropIndexes bool // drop deleted indexes + withForeignKeys bool // with foreign keys + mode Mode + hooks []Hook // hooks to apply before creation + diffHooks []DiffHook // diff hooks to run when diffing current and desired + applyHook []ApplyHook // apply hooks to run when applying the plan + skip ChangeKind // what changes to skip and not apply + dir migrate.Dir // the migration directory to read from + fmt migrate.Formatter // how to format the plan into migration files + + driver dialect.Driver // driver passed in when not using an atlas URL + url *url.URL // url of database connection + dialect string // Ent dialect to use when generating migration files + + types []string // pre-existing pk range allocation for global unique id +} + +// Diff compares the state read from a database connection or migration directory with the state defined by the Ent +// schema. Changes will be written to new migration files. +func Diff(ctx context.Context, u, name string, tables []*Table, opts ...MigrateOption) (err error) { + m, err := NewMigrateURL(u, opts...) + if err != nil { + return err + } + return m.NamedDiff(ctx, name, tables...) +} + +// NewMigrate creates a new Atlas form the given dialect.Driver. +func NewMigrate(drv dialect.Driver, opts ...MigrateOption) (*Atlas, error) { + a := &Atlas{driver: drv, withForeignKeys: true, mode: ModeInspect, sum: true} + for _, opt := range opts { + opt(a) + } + a.dialect = a.driver.Dialect() + if err := a.init(); err != nil { + return nil, err + } + return a, nil +} + +// NewMigrateURL create a new Atlas from the given url. +func NewMigrateURL(u string, opts ...MigrateOption) (*Atlas, error) { + parsed, err := url.Parse(u) + if err != nil { + return nil, err + } + a := &Atlas{url: parsed, withForeignKeys: true, mode: ModeInspect, sum: true} + for _, opt := range opts { + opt(a) + } + if a.dialect == "" { + a.dialect = parsed.Scheme + } + if err := a.init(); err != nil { + return nil, err + } + return a, nil +} + +// Create creates all schema resources in the database. It works in an "append-only" +// mode, which means, it only creates tables, appends columns to tables or modifies column types. +// +// Column can be modified by turning into a NULL from NOT NULL, or having a type conversion not +// resulting data altering. From example, changing varchar(255) to varchar(120) is invalid, but +// changing varchar(120) to varchar(255) is valid. For more info, see the convert function below. +func (a *Atlas) Create(ctx context.Context, tables ...*Table) (err error) { + a.setupTables(tables) + var creator Creator = CreateFunc(a.create) + if a.legacy { + m, err := a.legacyMigrate() + if err != nil { + return err + } + creator = CreateFunc(m.create) + } + for i := len(a.hooks) - 1; i >= 0; i-- { + creator = a.hooks[i](creator) + } + return creator.Create(ctx, tables...) +} + +// Diff compares the state read from the connected database with the state defined by Ent. +// Changes will be written to migration files by the configured Planner. +func (a *Atlas) Diff(ctx context.Context, tables ...*Table) error { + return a.NamedDiff(ctx, "changes", tables...) +} + +// NamedDiff compares the state read from the connected database with the state defined by Ent. +// Changes will be written to migration files by the configured Planner. +func (a *Atlas) NamedDiff(ctx context.Context, name string, tables ...*Table) error { + if a.dir == nil { + return errors.New("no migration directory given") + } + opts := []migrate.PlannerOption{migrate.WithFormatter(a.fmt)} + if a.sum { + // Validate the migration directory before proceeding. + if err := migrate.Validate(a.dir); err != nil { + return fmt.Errorf("validating migration directory: %w", err) + } + } else { + opts = append(opts, migrate.DisableChecksum()) + } + a.setupTables(tables) + // Set up connections. + if a.driver != nil { + var err error + a.sqlDialect, err = a.entDialect(ctx, a.driver) + if err != nil { + return err + } + a.atDriver, err = a.sqlDialect.atOpen(a.sqlDialect) + if err != nil { + return err + } + } else { + c, err := sqlclient.OpenURL(ctx, a.url) + if err != nil { + return err + } + defer c.Close() + a.sqlDialect, err = a.entDialect(ctx, entsql.OpenDB(a.dialect, c.DB)) + if err != nil { + return err + } + a.atDriver = c.Driver + } + defer func() { + a.sqlDialect = nil + a.atDriver = nil + }() + if err := a.sqlDialect.init(ctx); err != nil { + return err + } + if a.universalID { + tables = append(tables, NewTypesTable()) + } + var ( + err error + plan *migrate.Plan + ) + switch a.mode { + case ModeInspect: + plan, err = a.planInspect(ctx, a.sqlDialect, name, tables) + case ModeReplay: + plan, err = a.planReplay(ctx, name, tables) + default: + return fmt.Errorf("unknown migration mode: %q", a.mode) + } + switch { + case err != nil: + return err + case len(plan.Changes) == 0: + if a.errNoPlan { + return migrate.ErrNoPlan + } + return nil + default: + return migrate.NewPlanner(nil, a.dir, opts...).WritePlan(plan) + } +} + +func (a *Atlas) cleanSchema(ctx context.Context, name string, err0 error) (err error) { + defer func() { + if err0 != nil { + err = fmt.Errorf("%v: %w", err0, err) + } + }() + s, err := a.atDriver.InspectSchema(ctx, name, nil) + if err != nil { + return err + } + drop := make([]schema.Change, len(s.Tables)) + for i, t := range s.Tables { + drop[i] = &schema.DropTable{T: t} + } + return a.atDriver.ApplyChanges(ctx, drop) +} + +// VerifyTableRange ensures, that the defined autoincrement starting value is set for each table as defined by the +// TypTable. This is necessary for MySQL versions < 8.0. In those versions the defined starting value for AUTOINCREMENT +// columns was stored in memory, and when a server restarts happens and there are no rows yet in a table, the defined +// starting value is lost, which will result in incorrect behavior when working with global unique ids. Calling this +// method on service start ensures the information are correct and are set again, if they aren't. For MySQL versions > 8 +// calling this method is only required once after the upgrade. +func (a *Atlas) VerifyTableRange(ctx context.Context, tables []*Table) error { + if a.driver != nil { + var err error + a.sqlDialect, err = a.entDialect(ctx, a.driver) + if err != nil { + return err + } + } else { + c, err := sqlclient.OpenURL(ctx, a.url) + if err != nil { + return err + } + defer c.Close() + a.sqlDialect, err = a.entDialect(ctx, entsql.OpenDB(a.dialect, c.DB)) + if err != nil { + return err + } + } + defer func() { + a.sqlDialect = nil + }() + vr, ok := a.sqlDialect.(verifyRanger) + if !ok { + return nil + } + types, err := a.loadTypes(ctx, a.sqlDialect) + if err != nil { + // In most cases this means the table does not exist, which in turn + // indicates the user does not use global unique ids. + return err + } + for _, t := range tables { + id := indexOf(types, t.Name) + if id == -1 { + continue + } + if err := vr.verifyRange(ctx, a.sqlDialect, t, int64(id<<32)); err != nil { + return err + } + } + return nil +} + +type ( + // Differ is the interface that wraps the Diff method. + Differ interface { + // Diff returns a list of changes that construct a migration plan. + Diff(current, desired *schema.Schema) ([]schema.Change, error) + } + + // The DiffFunc type is an adapter to allow the use of ordinary function as Differ. + // If f is a function with the appropriate signature, DiffFunc(f) is a Differ that calls f. + DiffFunc func(current, desired *schema.Schema) ([]schema.Change, error) + + // DiffHook defines the "diff middleware". A function that gets a Differ and returns a Differ. + DiffHook func(Differ) Differ +) + +// Diff calls f(current, desired). +func (f DiffFunc) Diff(current, desired *schema.Schema) ([]schema.Change, error) { + return f(current, desired) +} + +// WithDiffHook adds a list of DiffHook to the schema migration. +// +// schema.WithDiffHook(func(next schema.Differ) schema.Differ { +// return schema.DiffFunc(func(current, desired *atlas.Schema) ([]atlas.Change, error) { +// // Code before standard diff. +// changes, err := next.Diff(current, desired) +// if err != nil { +// return nil, err +// } +// // After diff, you can filter +// // changes or return new ones. +// return changes, nil +// }) +// }) +func WithDiffHook(hooks ...DiffHook) MigrateOption { + return func(a *Atlas) { + a.diffHooks = append(a.diffHooks, hooks...) + } +} + +// WithSkipChanges allows skipping/filtering list of changes +// returned by the Differ before executing migration planning. +// +// SkipChanges(schema.DropTable|schema.DropColumn) +func WithSkipChanges(skip ChangeKind) MigrateOption { + return func(a *Atlas) { + a.skip = skip + } +} + +// A ChangeKind denotes the kind of schema change. +type ChangeKind uint + +// List of change types. +const ( + NoChange ChangeKind = 0 + AddSchema ChangeKind = 1 << (iota - 1) + ModifySchema + DropSchema + AddTable + ModifyTable + DropTable + AddColumn + ModifyColumn + DropColumn + AddIndex + ModifyIndex + DropIndex + AddForeignKey + ModifyForeignKey + DropForeignKey + AddCheck + ModifyCheck + DropCheck +) + +// Is reports whether c is match the given change kind. +func (k ChangeKind) Is(c ChangeKind) bool { + return k == c || k&c != 0 +} + +// filterChanges is a DiffHook for filtering changes before plan. +func filterChanges(skip ChangeKind) DiffHook { + return func(next Differ) Differ { + return DiffFunc(func(current, desired *schema.Schema) ([]schema.Change, error) { + var f func([]schema.Change) []schema.Change + f = func(changes []schema.Change) (keep []schema.Change) { + var k ChangeKind + for _, c := range changes { + switch c := c.(type) { + case *schema.AddSchema: + k = AddSchema + case *schema.ModifySchema: + k = ModifySchema + if !skip.Is(k) { + c.Changes = f(c.Changes) + } + case *schema.DropSchema: + k = DropSchema + case *schema.AddTable: + k = AddTable + case *schema.ModifyTable: + k = ModifyTable + if !skip.Is(k) { + c.Changes = f(c.Changes) + } + case *schema.DropTable: + k = DropTable + case *schema.AddColumn: + k = AddColumn + case *schema.ModifyColumn: + k = ModifyColumn + case *schema.DropColumn: + k = DropColumn + case *schema.AddIndex: + k = AddIndex + case *schema.ModifyIndex: + k = ModifyIndex + case *schema.DropIndex: + k = DropIndex + case *schema.AddForeignKey: + k = AddIndex + case *schema.ModifyForeignKey: + k = ModifyForeignKey + case *schema.DropForeignKey: + k = DropForeignKey + case *schema.AddCheck: + k = AddCheck + case *schema.ModifyCheck: + k = ModifyCheck + case *schema.DropCheck: + k = DropCheck + } + if !skip.Is(k) { + keep = append(keep, c) + } + } + return + } + changes, err := next.Diff(current, desired) + if err != nil { + return nil, err + } + return f(changes), nil + }) + } +} + +func withoutForeignKeys(next Differ) Differ { + return DiffFunc(func(current, desired *schema.Schema) ([]schema.Change, error) { + changes, err := next.Diff(current, desired) + if err != nil { + return nil, err + } + for _, c := range changes { + switch c := c.(type) { + case *schema.AddTable: + c.T.ForeignKeys = nil + case *schema.ModifyTable: + c.T.ForeignKeys = nil + filtered := make([]schema.Change, 0, len(c.Changes)) + for _, change := range c.Changes { + switch change.(type) { + case *schema.AddForeignKey, *schema.DropForeignKey, *schema.ModifyForeignKey: + continue + default: + filtered = append(filtered, change) + } + } + c.Changes = filtered + } + } + return changes, nil + }) +} + +type ( + // Applier is the interface that wraps the Apply method. + Applier interface { + // Apply applies the given migrate.Plan on the database. + Apply(context.Context, dialect.ExecQuerier, *migrate.Plan) error + } + + // The ApplyFunc type is an adapter to allow the use of ordinary function as Applier. + // If f is a function with the appropriate signature, ApplyFunc(f) is an Applier that calls f. + ApplyFunc func(context.Context, dialect.ExecQuerier, *migrate.Plan) error + + // ApplyHook defines the "migration applying middleware". A function that gets an Applier and returns an Applier. + ApplyHook func(Applier) Applier +) + +// Apply calls f(ctx, tables...). +func (f ApplyFunc) Apply(ctx context.Context, conn dialect.ExecQuerier, plan *migrate.Plan) error { + return f(ctx, conn, plan) +} + +// WithApplyHook adds a list of ApplyHook to the schema migration. +// +// schema.WithApplyHook(func(next schema.Applier) schema.Applier { +// return schema.ApplyFunc(func(ctx context.Context, conn dialect.ExecQuerier, plan *migrate.Plan) error { +// // Example to hook into the apply process, or implement +// // a custom applier. +// // +// // for _, c := range plan.Changes { +// // fmt.Printf("%s: %s", c.Comment, c.Cmd) +// // } +// // +// return next.Apply(ctx, conn, plan) +// }) +// }) +func WithApplyHook(hooks ...ApplyHook) MigrateOption { + return func(a *Atlas) { + a.applyHook = append(a.applyHook, hooks...) + } +} + +// WithAtlas is an opt-out option for v0.11 indicating the migration +// should be executed using the deprecated legacy engine. +// Note, in future versions, this option is going to be removed +// and the Atlas (https://atlasgo.io) based migration engine should be used. +// +// Deprecated: The legacy engine will be removed. +func WithAtlas(b bool) MigrateOption { + return func(a *Atlas) { + a.legacy = !b + } +} + +// WithDir sets the atlas migration directory to use to store migration files. +func WithDir(dir migrate.Dir) MigrateOption { + return func(a *Atlas) { + a.dir = dir + } +} + +// WithFormatter sets atlas formatter to use to write changes to migration files. +func WithFormatter(fmt migrate.Formatter) MigrateOption { + return func(a *Atlas) { + a.fmt = fmt + } +} + +// WithDialect configures the Ent dialect to use when migrating for an Atlas supported dialect flavor. +// As an example, Ent can work with TiDB in MySQL dialect and Atlas can handle TiDB migrations. +func WithDialect(d string) MigrateOption { + return func(a *Atlas) { + a.dialect = d + } +} + +// WithSumFile instructs atlas to generate a migration directory integrity sum file. +// +// Deprecated: generating the sum file is now opt-out. This method will be removed in future versions. +func WithSumFile() MigrateOption { + return func(a *Atlas) {} +} + +// DisableChecksum instructs atlas to skip migration directory integrity sum file generation. +// +// Deprecated: generating the sum file will no longer be optional in future versions. +func DisableChecksum() MigrateOption { + return func(a *Atlas) { + a.sum = false + } +} + +// WithMigrationMode instructs atlas how to compute the current state of the schema. This can be done by either +// replaying (ModeReplay) the migration directory on the connected database, or by inspecting (ModeInspect) the +// connection. Currently, ModeReplay is opt-in, and ModeInspect is the default. In future versions, ModeReplay will +// become the default behavior. This option has no effect when using online migrations. +func WithMigrationMode(mode Mode) MigrateOption { + return func(a *Atlas) { + a.mode = mode + } +} + +// Mode to compute the current state. +type Mode uint + +const ( + // ModeReplay computes the current state by replaying the migration directory on the connected database. + ModeReplay = iota + // ModeInspect computes the current state by inspecting the connected database. + ModeInspect +) + +// StateReader returns an atlas migrate.StateReader returning the state as described by the Ent table slice. +func (a *Atlas) StateReader(tables ...*Table) migrate.StateReaderFunc { + return func(ctx context.Context) (*schema.Realm, error) { + if a.sqlDialect == nil { + drv, err := a.entDialect(ctx, a.driver) + if err != nil { + return nil, err + } + a.sqlDialect = drv + } + ts, err := a.tables(tables) + if err != nil { + return nil, err + } + return &schema.Realm{Schemas: []*schema.Schema{{Tables: ts}}}, nil + } +} + +// atBuilder must be implemented by the different drivers in +// order to convert a dialect/sql/schema to atlas/sql/schema. +type atBuilder interface { + atOpen(dialect.ExecQuerier) (migrate.Driver, error) + atTable(*Table, *schema.Table) + supportsDefault(*Column) bool + atTypeC(*Column, *schema.Column) error + atUniqueC(*Table, *Column, *schema.Table, *schema.Column) + atIncrementC(*schema.Table, *schema.Column) + atIncrementT(*schema.Table, int64) + atIndex(*Index, *schema.Table, *schema.Index) error + atTypeRangeSQL(t ...string) string +} + +// init initializes the configuration object based on the options passed in. +func (a *Atlas) init() error { + skip := DropIndex | DropColumn + if a.skip != NoChange { + skip = a.skip + } + if a.dropIndexes { + skip &= ^DropIndex + } + if a.dropColumns { + skip &= ^DropColumn + } + if skip != NoChange { + a.diffHooks = append(a.diffHooks, filterChanges(skip)) + } + if !a.withForeignKeys { + a.diffHooks = append(a.diffHooks, withoutForeignKeys) + } + if a.dir != nil && a.fmt == nil { + switch a.dir.(type) { + case *sqltool.GooseDir: + a.fmt = sqltool.GooseFormatter + case *sqltool.DBMateDir: + a.fmt = sqltool.DBMateFormatter + case *sqltool.FlywayDir: + a.fmt = sqltool.FlywayFormatter + case *sqltool.LiquibaseDir: + a.fmt = sqltool.LiquibaseFormatter + default: // migrate.LocalDir, sqltool.GolangMigrateDir and custom ones + a.fmt = sqltool.GolangMigrateFormatter + } + } + if a.mode == ModeReplay { + // ModeReplay requires a migration directory. + if a.dir == nil { + return errors.New("sql/schema: WithMigrationMode(ModeReplay) requires versioned migrations: WithDir()") + } + // ModeReplay requires sum file generation. + if !a.sum { + return errors.New("sql/schema: WithMigrationMode(ModeReplay) requires migration directory integrity file") + } + } + return nil +} + +// create is the Atlas engine based online migration. +func (a *Atlas) create(ctx context.Context, tables ...*Table) (err error) { + if a.universalID { + tables = append(tables, NewTypesTable()) + } + if a.driver != nil { + a.sqlDialect, err = a.entDialect(ctx, a.driver) + if err != nil { + return err + } + } else { + c, err := sqlclient.OpenURL(ctx, a.url) + if err != nil { + return err + } + defer c.Close() + a.sqlDialect, err = a.entDialect(ctx, entsql.OpenDB(a.dialect, c.DB)) + if err != nil { + return err + } + } + defer func() { a.sqlDialect = nil }() + if err := a.sqlDialect.init(ctx); err != nil { + return err + } + // Open a transaction for backwards compatibility, + // even if the migration is not transactional. + tx, err := a.sqlDialect.Tx(ctx) + if err != nil { + return err + } + a.atDriver, err = a.sqlDialect.atOpen(tx) + if err != nil { + return err + } + defer func() { a.atDriver = nil }() + if err := func() error { + plan, err := a.planInspect(ctx, tx, "changes", tables) + if err != nil { + return err + } + // Apply plan (changes). + var applier Applier = ApplyFunc(func(ctx context.Context, tx dialect.ExecQuerier, plan *migrate.Plan) error { + for _, c := range plan.Changes { + if err := tx.Exec(ctx, c.Cmd, c.Args, nil); err != nil { + if c.Comment != "" { + err = fmt.Errorf("%s: %w", c.Comment, err) + } + return err + } + } + return nil + }) + for i := len(a.applyHook) - 1; i >= 0; i-- { + applier = a.applyHook[i](applier) + } + return applier.Apply(ctx, tx, plan) + }(); err != nil { + err = fmt.Errorf("sql/schema: %w", err) + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%w: %v", err, rerr) + } + return err + } + return tx.Commit() +} + +// planInspect creates the current state by inspecting the connected database, computing the current state of the Ent schema +// and proceeds to diff the changes to create a migration plan. +func (a *Atlas) planInspect(ctx context.Context, conn dialect.ExecQuerier, name string, tables []*Table) (*migrate.Plan, error) { + current, err := a.atDriver.InspectSchema(ctx, "", &schema.InspectOptions{ + Tables: func() (t []string) { + for i := range tables { + t = append(t, tables[i].Name) + } + return t + }(), + }) + if err != nil { + return nil, err + } + var types []string + if a.universalID { + types, err = a.loadTypes(ctx, conn) + if err != nil && !errors.Is(err, errTypeTableNotFound) { + return nil, err + } + a.types = types + } + realm, err := a.StateReader(tables...).ReadState(ctx) + if err != nil { + return nil, err + } + desired := realm.Schemas[0] + desired.Name, desired.Attrs = current.Name, current.Attrs + return a.diff(ctx, name, current, desired, a.types[len(types):]) +} + +func (a *Atlas) planReplay(ctx context.Context, name string, tables []*Table) (*migrate.Plan, error) { + // We consider a database clean if there are no tables in the connected schema. + s, err := a.atDriver.InspectSchema(ctx, "", nil) + if err != nil { + return nil, err + } + if len(s.Tables) > 0 { + return nil, &migrate.NotCleanError{Reason: fmt.Sprintf("found table %q", s.Tables[0].Name)} + } + // Replay the migration directory on the database. + ex, err := migrate.NewExecutor(a.atDriver, a.dir, &migrate.NopRevisionReadWriter{}) + if err != nil { + return nil, err + } + if err := ex.ExecuteN(ctx, 0); err != nil && !errors.Is(err, migrate.ErrNoPendingFiles) { + return nil, a.cleanSchema(ctx, "", err) + } + // Inspect the current schema (migration directory). + current, err := a.atDriver.InspectSchema(ctx, "", nil) + if err != nil { + return nil, a.cleanSchema(ctx, "", err) + } + var types []string + if a.universalID { + if types, err = a.loadTypes(ctx, a.sqlDialect); err != nil && !errors.Is(err, errTypeTableNotFound) { + return nil, a.cleanSchema(ctx, "", err) + } + a.types = types + } + if err := a.cleanSchema(ctx, "", nil); err != nil { + return nil, fmt.Errorf("clean schemas after migration replaying: %w", err) + } + desired, err := a.tables(tables) + if err != nil { + return nil, err + } + // In case of replay mode, normalize the desired state (i.e. ent/schema). + if nr, ok := a.atDriver.(schema.Normalizer); ok { + ns, err := nr.NormalizeSchema(ctx, schema.New(current.Name).AddTables(desired...)) + if err != nil { + return nil, err + } + if len(ns.Tables) != len(desired) { + return nil, fmt.Errorf("unexpected number of tables after normalization: %d != %d", len(ns.Tables), len(desired)) + } + // Ensure all tables exist in the normalized format and the order is preserved. + for i, t := range desired { + d, ok := ns.Table(t.Name) + if !ok { + return nil, fmt.Errorf("table %q not found after normalization", t.Name) + } + desired[i] = d + } + } + return a.diff(ctx, name, current, + &schema.Schema{Name: current.Name, Attrs: current.Attrs, Tables: desired}, a.types[len(types):], + // For BC reason, we omit the schema qualifier from the migration scripts, + // but that is currently limiting versioned migration to a single schema. + func(opts *migrate.PlanOptions) { + var noQualifier string + opts.SchemaQualifier = &noQualifier + }, + ) +} + +func (a *Atlas) diff(ctx context.Context, name string, current, desired *schema.Schema, newTypes []string, opts ...migrate.PlanOption) (*migrate.Plan, error) { + changes, err := (&diffDriver{a.atDriver, a.diffHooks}).SchemaDiff(current, desired) + if err != nil { + return nil, err + } + filtered := make([]schema.Change, 0, len(changes)) + for _, c := range changes { + // Skip any table drops explicitly. The reason we may encounter this, even though specific tables are passed + // to Inspect, is if the MySQL system variable 'lower_case_table_names' is set to 1. In such a case, the given + // tables will be returned from inspection because MySQL compares case-insensitive, but they won't match when + // compare them in code. + if _, ok := c.(*schema.DropTable); !ok { + filtered = append(filtered, c) + } + } + if a.indent != "" { + opts = append(opts, func(opts *migrate.PlanOptions) { + opts.Indent = a.indent + }) + } + plan, err := a.atDriver.PlanChanges(ctx, name, filtered, opts...) + if err != nil { + return nil, err + } + if len(newTypes) > 0 { + plan.Changes = append(plan.Changes, &migrate.Change{ + Cmd: a.sqlDialect.atTypeRangeSQL(newTypes...), + Comment: fmt.Sprintf("add pk ranges for %s tables", strings.Join(newTypes, ",")), + }) + } + return plan, nil +} + +var errTypeTableNotFound = errors.New("ent_type table not found") + +// loadTypes loads the currently saved range allocations from the TypeTable. +func (a *Atlas) loadTypes(ctx context.Context, conn dialect.ExecQuerier) ([]string, error) { + // Fetch pre-existing type allocations. + exists, err := a.sqlDialect.tableExist(ctx, conn, TypeTable) + if err != nil { + return nil, err + } + if !exists { + return nil, errTypeTableNotFound + } + rows := &entsql.Rows{} + query, args := entsql.Dialect(a.dialect). + Select("type").From(entsql.Table(TypeTable)).OrderBy(entsql.Asc("id")).Query() + if err := conn.Query(ctx, query, args, rows); err != nil { + return nil, fmt.Errorf("query types table: %w", err) + } + defer rows.Close() + var types []string + if err := entsql.ScanSlice(rows, &types); err != nil { + return nil, err + } + return types, nil +} + +type db struct{ dialect.ExecQuerier } + +func (d *db) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) { + rows := &entsql.Rows{} + if err := d.ExecQuerier.Query(ctx, query, args, rows); err != nil { + return nil, err + } + return rows.ColumnScanner.(*sql.Rows), nil +} + +func (d *db) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { + var r sql.Result + if err := d.ExecQuerier.Exec(ctx, query, args, &r); err != nil { + return nil, err + } + return r, nil +} + +// tables converts an Ent table slice to an atlas table slice +func (a *Atlas) tables(tables []*Table) ([]*schema.Table, error) { + ts := make([]*schema.Table, len(tables)) + for i, et := range tables { + at := schema.NewTable(et.Name) + if et.Comment != "" { + at.SetComment(et.Comment) + } + a.sqlDialect.atTable(et, at) + if a.universalID && et.Name != TypeTable && len(et.PrimaryKey) == 1 { + r, err := a.pkRange(et) + if err != nil { + return nil, err + } + a.sqlDialect.atIncrementT(at, r) + } + if err := a.aColumns(et, at); err != nil { + return nil, err + } + if err := a.aIndexes(et, at); err != nil { + return nil, err + } + ts[i] = at + } + for i, t1 := range tables { + t2 := ts[i] + for _, fk1 := range t1.ForeignKeys { + fk2 := schema.NewForeignKey(fk1.Symbol). + SetTable(t2). + SetOnUpdate(schema.ReferenceOption(fk1.OnUpdate)). + SetOnDelete(schema.ReferenceOption(fk1.OnDelete)) + for _, c1 := range fk1.Columns { + c2, ok := t2.Column(c1.Name) + if !ok { + return nil, fmt.Errorf("unexpected fk %q column: %q", fk1.Symbol, c1.Name) + } + fk2.AddColumns(c2) + } + var refT *schema.Table + for _, t2 := range ts { + if t2.Name == fk1.RefTable.Name { + refT = t2 + break + } + } + if refT == nil { + return nil, fmt.Errorf("unexpected fk %q ref-table: %q", fk1.Symbol, fk1.RefTable.Name) + } + fk2.SetRefTable(refT) + for _, c1 := range fk1.RefColumns { + c2, ok := refT.Column(c1.Name) + if !ok { + return nil, fmt.Errorf("unexpected fk %q ref-column: %q", fk1.Symbol, c1.Name) + } + fk2.AddRefColumns(c2) + } + t2.AddForeignKeys(fk2) + } + } + return ts, nil +} + +func (a *Atlas) aColumns(et *Table, at *schema.Table) error { + for _, c1 := range et.Columns { + c2 := schema.NewColumn(c1.Name). + SetNull(c1.Nullable) + if c1.Collation != "" { + c2.SetCollation(c1.Collation) + } + if c1.Comment != "" { + c2.SetComment(c1.Comment) + } + if err := a.sqlDialect.atTypeC(c1, c2); err != nil { + return err + } + if err := a.atDefault(c1, c2); err != nil { + return err + } + if c1.Unique && (len(et.PrimaryKey) != 1 || et.PrimaryKey[0] != c1) { + a.sqlDialect.atUniqueC(et, c1, at, c2) + } + if c1.Increment { + a.sqlDialect.atIncrementC(at, c2) + } + at.AddColumns(c2) + } + return nil +} + +func (a *Atlas) atDefault(c1 *Column, c2 *schema.Column) error { + if c1.Default == nil || !a.sqlDialect.supportsDefault(c1) { + return nil + } + switch x := c1.Default.(type) { + case Expr: + if len(x) > 1 && (x[0] != '(' || x[len(x)-1] != ')') { + x = "(" + x + ")" + } + c2.SetDefault(&schema.RawExpr{X: string(x)}) + case map[string]Expr: + d, ok := x[a.sqlDialect.Dialect()] + if !ok { + return nil + } + if len(d) > 1 && (d[0] != '(' || d[len(d)-1] != ')') { + d = "(" + d + ")" + } + c2.SetDefault(&schema.RawExpr{X: string(d)}) + default: + switch { + case c1.Type == field.TypeJSON: + s, ok := c1.Default.(string) + if !ok { + return fmt.Errorf("invalid default value for JSON column %q: %v", c1.Name, c1.Default) + } + c2.SetDefault(&schema.Literal{V: strings.ReplaceAll(s, "'", "''")}) + default: + // Keep backwards compatibility with the old default value format. + x := fmt.Sprint(c1.Default) + if v, ok := c1.Default.(string); ok && c1.Type != field.TypeUUID && c1.Type != field.TypeTime { + // Escape single quote by replacing each with 2. + x = fmt.Sprintf("'%s'", strings.ReplaceAll(v, "'", "''")) + } + c2.SetDefault(&schema.RawExpr{X: x}) + } + } + return nil +} + +func (a *Atlas) aIndexes(et *Table, at *schema.Table) error { + // Primary-key index. + pk := make([]*schema.Column, 0, len(et.PrimaryKey)) + for _, c1 := range et.PrimaryKey { + c2, ok := at.Column(c1.Name) + if !ok { + return fmt.Errorf("unexpected primary-key column: %q", c1.Name) + } + pk = append(pk, c2) + } + // CreateFunc might clear the primary keys. + if len(pk) > 0 { + at.SetPrimaryKey(schema.NewPrimaryKey(pk...)) + } + // Rest of indexes. + for _, idx1 := range et.Indexes { + idx2 := schema.NewIndex(idx1.Name). + SetUnique(idx1.Unique) + if err := a.sqlDialect.atIndex(idx1, at, idx2); err != nil { + return err + } + desc := descIndexes(idx1) + for _, p := range idx2.Parts { + p.Desc = desc[p.C.Name] + } + at.AddIndexes(idx2) + } + return nil +} + +// setupTables ensures the table is configured properly, like table columns +// are linked to their indexes, and PKs columns are defined. +func (a *Atlas) setupTables(tables []*Table) { + for _, t := range tables { + if t.columns == nil { + t.columns = make(map[string]*Column, len(t.Columns)) + } + for _, c := range t.Columns { + t.columns[c.Name] = c + } + for _, idx := range t.Indexes { + idx.Name = a.symbol(idx.Name) + for _, c := range idx.Columns { + c.indexes.append(idx) + } + } + for _, pk := range t.PrimaryKey { + c := t.columns[pk.Name] + c.Key = PrimaryKey + pk.Key = PrimaryKey + } + for _, fk := range t.ForeignKeys { + fk.Symbol = a.symbol(fk.Symbol) + for i := range fk.Columns { + fk.Columns[i].foreign = fk + } + } + } +} + +// symbol makes sure the symbol length is not longer than the maxlength in the dialect. +func (a *Atlas) symbol(name string) string { + size := 64 + if a.dialect == dialect.Postgres { + size = 63 + } + if len(name) <= size { + return name + } + return fmt.Sprintf("%s_%x", name[:size-33], md5.Sum([]byte(name))) +} + +// entDialect returns the Ent dialect as configured by the dialect option. +func (a *Atlas) entDialect(ctx context.Context, drv dialect.Driver) (sqlDialect, error) { + var d sqlDialect + switch a.dialect { + case dialect.MySQL: + d = &MySQL{Driver: drv} + case dialect.SQLite: + d = &SQLite{Driver: drv, WithForeignKeys: a.withForeignKeys} + case dialect.Postgres: + d = &Postgres{Driver: drv} + default: + return nil, fmt.Errorf("sql/schema: unsupported dialect %q", a.dialect) + } + if err := d.init(ctx); err != nil { + return nil, err + } + return d, nil +} + +func (a *Atlas) pkRange(et *Table) (int64, error) { + idx := indexOf(a.types, et.Name) + // If the table re-created, re-use its range from + // the past. Otherwise, allocate a new id-range. + if idx == -1 { + if len(a.types) > MaxTypes { + return 0, fmt.Errorf("max number of types exceeded: %d", MaxTypes) + } + idx = len(a.types) + a.types = append(a.types, et.Name) + } + return int64(idx << 32), nil +} + +func setAtChecks(et *Table, at *schema.Table) { + if check := et.Annotation.Check; check != "" { + at.AddChecks(&schema.Check{ + Expr: check, + }) + } + if checks := et.Annotation.Checks; len(et.Annotation.Checks) > 0 { + names := make([]string, 0, len(checks)) + for name := range checks { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + at.AddChecks(&schema.Check{ + Name: name, + Expr: checks[name], + }) + } + } +} + +// descIndexes returns a map holding the DESC mapping if exist. +func descIndexes(idx *Index) map[string]bool { + descs := make(map[string]bool) + if idx.Annotation == nil { + return descs + } + // If DESC (without a column) was defined on the + // annotation, map it to the single column index. + if idx.Annotation.Desc && len(idx.Columns) == 1 { + descs[idx.Columns[0].Name] = idx.Annotation.Desc + } + for column, desc := range idx.Annotation.DescColumns { + descs[column] = desc + } + return descs +} + +// driver decorates the atlas migrate.Driver and adds "diff hooking" and functionality. +type diffDriver struct { + migrate.Driver + hooks []DiffHook // hooks to apply +} + +// RealmDiff creates the diff between two realms. Since Ent does not care about Realms, +// not even schema changes, calling this method raises an error. +func (r *diffDriver) RealmDiff(_, _ *schema.Realm) ([]schema.Change, error) { + return nil, errors.New("sqlDialect does not support working with realms") +} + +// SchemaDiff creates the diff between two schemas, but includes "diff hooks". +func (r *diffDriver) SchemaDiff(from, to *schema.Schema) ([]schema.Change, error) { + var d Differ = DiffFunc(func(current, desired *schema.Schema) ([]schema.Change, error) { + return r.Driver.SchemaDiff(current, desired) + }) + for i := len(r.hooks) - 1; i >= 0; i-- { + d = r.hooks[i](d) + } + return d.Diff(from, to) +} + +// legacyMigrate returns a configured legacy migration engine (before Atlas) to keep backwards compatibility. +// +// Deprecated: Will be removed alongside legacy migration support. +func (a *Atlas) legacyMigrate() (*Migrate, error) { + m := &Migrate{ + universalID: a.universalID, + dropColumns: a.dropColumns, + dropIndexes: a.dropIndexes, + withFixture: a.withFixture, + withForeignKeys: a.withForeignKeys, + hooks: a.hooks, + atlas: a, + } + switch a.dialect { + case dialect.MySQL: + m.sqlDialect = &MySQL{Driver: a.driver} + case dialect.SQLite: + m.sqlDialect = &SQLite{Driver: a.driver, WithForeignKeys: a.withForeignKeys} + case dialect.Postgres: + m.sqlDialect = &Postgres{Driver: a.driver} + default: + return nil, fmt.Errorf("sql/schema: unsupported dialect %q", a.dialect) + } + return m, nil +} + +// removeAttr is a temporary patch due to compiler errors we get by using the generic +// schema.RemoveAttr function (:1: internal compiler error: panic: ...). +// Can be removed in Go 1.20. See: https://github.com/golang/go/issues/54302. +func removeAttr(attrs []schema.Attr, t reflect.Type) []schema.Attr { + f := make([]schema.Attr, 0, len(attrs)) + for _, a := range attrs { + if reflect.TypeOf(a) != t { + f = append(f, a) + } + } + return f +} diff --git a/vendor/entgo.io/ent/dialect/sql/schema/inspect.go b/vendor/entgo.io/ent/dialect/sql/schema/inspect.go new file mode 100644 index 00000000..95d52915 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/schema/inspect.go @@ -0,0 +1,95 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" +) + +// InspectOption allows for managing schema configuration using functional options. +type InspectOption func(inspect *Inspector) + +// WithSchema provides a schema (named-database) for reading the tables from. +func WithSchema(schema string) InspectOption { + return func(m *Inspector) { + m.schema = schema + } +} + +// An Inspector provides methods for inspecting database tables. +type Inspector struct { + sqlDialect + schema string +} + +// NewInspect returns an inspector for the given SQL driver. +func NewInspect(d dialect.Driver, opts ...InspectOption) (*Inspector, error) { + i := &Inspector{} + for _, opt := range opts { + opt(i) + } + switch d.Dialect() { + case dialect.MySQL: + i.sqlDialect = &MySQL{Driver: d, schema: i.schema} + case dialect.SQLite: + i.sqlDialect = &SQLite{Driver: d} + case dialect.Postgres: + i.sqlDialect = &Postgres{Driver: d, schema: i.schema} + default: + return nil, fmt.Errorf("sql/schema: unsupported dialect %q", d.Dialect()) + } + return i, nil +} + +// Tables returns the tables in the schema. +func (i *Inspector) Tables(ctx context.Context) ([]*Table, error) { + names, err := i.tables(ctx) + if err != nil { + return nil, err + } + tx := dialect.NopTx(i.sqlDialect) + tables := make([]*Table, 0, len(names)) + for _, name := range names { + t, err := i.table(ctx, tx, name) + if err != nil { + return nil, err + } + tables = append(tables, t) + } + + fki, ok := i.sqlDialect.(interface { + foreignKeys(context.Context, dialect.Tx, []*Table) error + }) + if ok { + if err := fki.foreignKeys(ctx, tx, tables); err != nil { + return nil, err + } + } + return tables, nil +} + +func (i *Inspector) tables(ctx context.Context) ([]string, error) { + t, ok := i.sqlDialect.(interface{ tables() sql.Querier }) + if !ok { + return nil, fmt.Errorf("sql/schema: %q driver does not support inspection", i.Dialect()) + } + query, args := t.tables().Query() + var ( + names []string + rows = &sql.Rows{} + ) + if err := i.Query(ctx, query, args, rows); err != nil { + return nil, fmt.Errorf("%q driver: reading table names %w", i.Dialect(), err) + } + defer rows.Close() + if err := sql.ScanSlice(rows, &names); err != nil { + return nil, err + } + return names, nil +} diff --git a/vendor/entgo.io/ent/dialect/sql/schema/migrate.go b/vendor/entgo.io/ent/dialect/sql/schema/migrate.go new file mode 100644 index 00000000..014eb98b --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/schema/migrate.go @@ -0,0 +1,660 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/schema/field" +) + +const ( + // TypeTable defines the table name holding the type information. + TypeTable = "ent_types" + + // MaxTypes defines the max number of types can be created when + // defining universal ids. The left 16-bits are reserved. + MaxTypes = math.MaxUint16 +) + +// NewTypesTable returns a new table for holding the global-id information. +func NewTypesTable() *Table { + return NewTable(TypeTable). + AddPrimary(&Column{Name: "id", Type: field.TypeUint, Increment: true}). + AddColumn(&Column{Name: "type", Type: field.TypeString, Unique: true}) +} + +// MigrateOption allows configuring Atlas using functional arguments. +type MigrateOption func(*Atlas) + +// WithGlobalUniqueID sets the universal ids options to the migration. +// Defaults to false. +func WithGlobalUniqueID(b bool) MigrateOption { + return func(a *Atlas) { + a.universalID = b + } +} + +// WithIndent sets Atlas to generate SQL statements with indentation. +// An empty string indicates no indentation. +func WithIndent(indent string) MigrateOption { + return func(a *Atlas) { + a.indent = indent + } +} + +// WithErrNoPlan sets Atlas to returns a migrate.ErrNoPlan in case +// the migration plan is empty. Defaults to false. +func WithErrNoPlan(b bool) MigrateOption { + return func(a *Atlas) { + a.errNoPlan = b + } +} + +// WithDropColumn sets the columns dropping option to the migration. +// Defaults to false. +func WithDropColumn(b bool) MigrateOption { + return func(a *Atlas) { + a.dropColumns = b + } +} + +// WithDropIndex sets the indexes dropping option to the migration. +// Defaults to false. +func WithDropIndex(b bool) MigrateOption { + return func(a *Atlas) { + a.dropIndexes = b + } +} + +// WithFixture sets the foreign-key renaming option to the migration when upgrading +// sqlDialect from v0.1.0 (issue-#285). Defaults to false. +// +// Deprecated: This option is no longer needed with the Atlas based +// migration engine, which now is the default. +func WithFixture(b bool) MigrateOption { + return func(a *Atlas) { + a.withFixture = b + } +} + +// WithForeignKeys enables creating foreign-key in ddl. Defaults to true. +func WithForeignKeys(b bool) MigrateOption { + return func(a *Atlas) { + a.withForeignKeys = b + } +} + +// WithHooks adds a list of hooks to the schema migration. +func WithHooks(hooks ...Hook) MigrateOption { + return func(a *Atlas) { + a.hooks = append(a.hooks, hooks...) + } +} + +type ( + // Creator is the interface that wraps the Create method. + Creator interface { + // Create creates the given tables in the database. See Migrate.Create for more details. + Create(context.Context, ...*Table) error + } + + // The CreateFunc type is an adapter to allow the use of ordinary function as Creator. + // If f is a function with the appropriate signature, CreateFunc(f) is a Creator that calls f. + CreateFunc func(context.Context, ...*Table) error + + // Hook defines the "create middleware". A function that gets a Creator and returns a Creator. + // For example: + // + // hook := func(next schema.Creator) schema.Creator { + // return schema.CreateFunc(func(ctx context.Context, tables ...*schema.Table) error { + // fmt.Println("Tables:", tables) + // return next.Create(ctx, tables...) + // }) + // } + // + Hook func(Creator) Creator +) + +// Create calls f(ctx, tables...). +func (f CreateFunc) Create(ctx context.Context, tables ...*Table) error { + return f(ctx, tables...) +} + +// Migrate runs the migration logic for the SQL dialects. +// +// Deprecated: Use the new Atlas struct instead. +type Migrate struct { + sqlDialect + atlas *Atlas // Atlas this Migrate is based on + + universalID bool // global unique ids + dropColumns bool // drop deleted columns + dropIndexes bool // drop deleted indexes + withFixture bool // with fks rename fixture + withForeignKeys bool // with foreign keys + typeRanges []string // types order by their range + hooks []Hook // hooks to apply before creation +} + +// Create creates all schema resources in the database. It works in an "append-only" +// mode, which means, it only creates tables, appends columns to tables or modifies column types. +// +// Column can be modified by turning into a NULL from NOT NULL, or having a type conversion not +// resulting data altering. From example, changing varchar(255) to varchar(120) is invalid, but +// changing varchar(120) to varchar(255) is valid. For more info, see the convert function below. +// +// Note that SQLite dialect does not support (this moment) the "append-only" mode describe above, +// since it's used only for testing. +func (m *Migrate) Create(ctx context.Context, tables ...*Table) error { + m.setupTables(tables) + var creator Creator = CreateFunc(m.create) + for i := len(m.hooks) - 1; i >= 0; i-- { + creator = m.hooks[i](creator) + } + return creator.Create(ctx, tables...) +} + +func (m *Migrate) create(ctx context.Context, tables ...*Table) error { + if err := m.init(ctx); err != nil { + return err + } + tx, err := m.Tx(ctx) + if err != nil { + return err + } + if m.universalID { + if err := m.types(ctx, tx); err != nil { + return rollback(tx, err) + } + } + if err := m.txCreate(ctx, tx, tables...); err != nil { + return rollback(tx, err) + } + return tx.Commit() +} + +func (m *Migrate) txCreate(ctx context.Context, tx dialect.Tx, tables ...*Table) error { + for _, t := range tables { + switch exist, err := m.tableExist(ctx, tx, t.Name); { + case err != nil: + return err + case exist: + curr, err := m.table(ctx, tx, t.Name) + if err != nil { + return err + } + if err := m.verify(ctx, tx, curr); err != nil { + return err + } + if err := m.fixture(ctx, tx, curr, t); err != nil { + return err + } + change, err := m.changeSet(curr, t) + if err != nil { + return fmt.Errorf("creating changeset for %q: %w", t.Name, err) + } + if err := m.apply(ctx, tx, t.Name, change); err != nil { + return err + } + default: // !exist + query, args := m.tBuilder(t).Query() + if err := tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("create table %q: %w", t.Name, err) + } + // If global unique identifier is enabled, and it's not + // a relation table, allocate a range for the table pk. + if m.universalID && len(t.PrimaryKey) == 1 { + if err := m.allocPKRange(ctx, tx, t); err != nil { + return err + } + } + // indexes. + for _, idx := range t.Indexes { + query, args := m.addIndex(idx, t.Name).Query() + if err := tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("create index %q: %w", idx.Name, err) + } + } + } + } + if !m.withForeignKeys { + return nil + } + // Create foreign keys after tables were created/altered, + // because circular foreign-key constraints are possible. + for _, t := range tables { + if len(t.ForeignKeys) == 0 { + continue + } + fks := make([]*ForeignKey, 0, len(t.ForeignKeys)) + for _, fk := range t.ForeignKeys { + exist, err := m.fkExist(ctx, tx, fk.Symbol) + if err != nil { + return err + } + if !exist { + fks = append(fks, fk) + } + } + if len(fks) == 0 { + continue + } + b := sql.Dialect(m.Dialect()).AlterTable(t.Name) + for _, fk := range fks { + b.AddForeignKey(fk.DSL()) + } + query, args := b.Query() + if err := tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("create foreign keys for %q: %w", t.Name, err) + } + } + return nil +} + +// apply changes on the given table. +func (m *Migrate) apply(ctx context.Context, tx dialect.Tx, table string, change *changes) error { + // Constraints should be dropped before dropping columns, because if a column + // is a part of multi-column constraints (like, unique index), ALTER TABLE + // might fail if the intermediate state violates the constraints. + if m.dropIndexes { + if pr, ok := m.sqlDialect.(preparer); ok { + if err := pr.prepare(ctx, tx, change, table); err != nil { + return err + } + } + for _, idx := range change.index.drop { + if err := m.dropIndex(ctx, tx, idx, table); err != nil { + return fmt.Errorf("drop index of table %q: %w", table, err) + } + } + } + var drop []*Column + if m.dropColumns { + drop = change.column.drop + } + queries := m.alterColumns(table, change.column.add, change.column.modify, drop) + // If there's actual action to execute on ALTER TABLE. + for i := range queries { + query, args := queries[i].Query() + if err := tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("alter table %q: %w", table, err) + } + } + for _, idx := range change.index.add { + query, args := m.addIndex(idx, table).Query() + if err := tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("create index %q: %w", table, err) + } + } + return nil +} + +// changes to apply on existing table. +type changes struct { + // column changes. + column struct { + add []*Column + drop []*Column + modify []*Column + } + // index changes. + index struct { + add Indexes + drop Indexes + } +} + +// dropColumn returns the dropped column by name (if any). +func (c *changes) dropColumn(name string) (*Column, bool) { + for _, col := range c.column.drop { + if col.Name == name { + return col, true + } + } + return nil, false +} + +// changeSet returns a changes object to be applied on existing table. +// It fails if one of the changes is invalid. +func (m *Migrate) changeSet(curr, new *Table) (*changes, error) { + change := &changes{} + // pks. + if len(curr.PrimaryKey) != len(new.PrimaryKey) { + return nil, fmt.Errorf("cannot change primary key for table: %q", curr.Name) + } + for i := range curr.PrimaryKey { + if curr.PrimaryKey[i].Name != new.PrimaryKey[i].Name { + return nil, fmt.Errorf("cannot change primary key for table: %q", curr.Name) + } + } + // Add or modify columns. + for _, c1 := range new.Columns { + // Ignore primary keys. + if c1.PrimaryKey() { + continue + } + switch c2, ok := curr.column(c1.Name); { + case !ok: + change.column.add = append(change.column.add, c1) + case !c2.Type.Valid(): + return nil, fmt.Errorf("invalid type %q for column %q", c2.typ, c2.Name) + // Modify a non-unique column to unique. + case c1.Unique && !c2.Unique: + // Make sure the table does not have unique index for this column + // before adding it to the changeset, because there are 2 ways to + // configure uniqueness on sqlDialect.Field (using the Unique modifier or + // adding rule on the Indexes option). + if idx, ok := curr.index(c1.Name); !ok || !idx.Unique { + change.index.add.append(&Index{ + Name: c1.Name, + Unique: true, + Columns: []*Column{c1}, + columns: []string{c1.Name}, + }) + } + // Modify a unique column to non-unique. + case !c1.Unique && c2.Unique: + // If the uniqueness was defined on the Indexes option, + // or was moved from the Unique modifier to the Indexes. + if idx, ok := new.index(c1.Name); ok && idx.Unique { + continue + } + idx, ok := curr.index(c2.Name) + if !ok { + return nil, fmt.Errorf("missing index to drop for unique column %q", c2.Name) + } + change.index.drop.append(idx) + // Extending column types. + case m.needsConversion(c2, c1): + if !c2.ConvertibleTo(c1) { + return nil, fmt.Errorf("changing column type for %q is invalid (%s != %s)", c1.Name, m.cType(c1), m.cType(c2)) + } + fallthrough + // Change nullability of a column. + case c1.Nullable != c2.Nullable: + change.column.modify = append(change.column.modify, c1) + // Change default value. + case c1.Default != nil && c2.Default == nil: + change.column.modify = append(change.column.modify, c1) + } + } + // Drop columns. + for _, c1 := range curr.Columns { + // If a column was dropped, multi-columns indexes that are associated with this column will + // no longer behave the same. Therefore, these indexes should be dropped too. There's no need + // to do it explicitly (here), because entc will remove them from the schema specification, + // and they will be dropped in the block below. + if _, ok := new.column(c1.Name); !ok { + change.column.drop = append(change.column.drop, c1) + } + } + // Add or modify indexes. + for _, idx1 := range new.Indexes { + switch idx2, ok := curr.index(idx1.Name); { + case !ok: + change.index.add.append(idx1) + // Changing index cardinality require drop and create. + case idx1.Unique != idx2.Unique: + change.index.drop.append(idx2) + change.index.add.append(idx1) + default: + im, ok := m.sqlDialect.(interface{ indexModified(old, new *Index) bool }) + // If the dialect supports comparing indexes. + if ok && im.indexModified(idx2, idx1) { + change.index.drop.append(idx2) + change.index.add.append(idx1) + } + } + } + // Drop indexes. + for _, idx := range curr.Indexes { + if _, isFK := new.fk(idx.Name); !isFK && !new.hasIndex(idx.Name, idx.realname) { + change.index.drop.append(idx) + } + } + return change, nil +} + +// fixture is a special migration code for renaming foreign-key columns (issue-#285). +func (m *Migrate) fixture(ctx context.Context, tx dialect.Tx, curr, new *Table) error { + d, ok := m.sqlDialect.(fkRenamer) + if !m.withFixture || !m.withForeignKeys || !ok { + return nil + } + rename := make(map[string]*Index) + for _, fk := range new.ForeignKeys { + ok, err := m.fkExist(ctx, tx, fk.Symbol) + if err != nil { + return fmt.Errorf("checking foreign-key existence %q: %w", fk.Symbol, err) + } + if !ok { + continue + } + column, err := m.fkColumn(ctx, tx, fk) + if err != nil { + return err + } + newcol := fk.Columns[0] + if column == newcol.Name { + continue + } + query, args := d.renameColumn(curr, &Column{Name: column}, newcol).Query() + if err := tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("rename column %q: %w", column, err) + } + prev, ok := curr.column(column) + if !ok { + continue + } + // Find all indexes that ~maybe need to be renamed. + for _, idx := range prev.indexes { + switch _, ok := new.index(idx.Name); { + // Ignore indexes that exist in the schema, PKs. + case ok || idx.primary: + // Index that was created implicitly for a unique + // column needs to be renamed to the column name. + case d.isImplicitIndex(idx, prev): + idx2 := &Index{Name: newcol.Name, Unique: true, Columns: []*Column{newcol}} + query, args := d.renameIndex(curr, idx, idx2).Query() + if err := tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("rename index %q: %w", prev.Name, err) + } + idx.Name = idx2.Name + default: + rename[idx.Name] = idx + } + } + // Update the name of the loaded column, so `changeSet` won't create it. + prev.Name = newcol.Name + } + // Go over the indexes that need to be renamed + // and find their ~identical in the new schema. + for _, idx := range rename { + Find: + // Find its ~identical in the new schema, and rename it + // if it doesn't exist. + for _, idx2 := range new.Indexes { + if _, ok := curr.index(idx2.Name); ok { + continue + } + if idx.sameAs(idx2) { + query, args := d.renameIndex(curr, idx, idx2).Query() + if err := tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("rename index %q: %w", idx.Name, err) + } + idx.Name = idx2.Name + break Find + } + } + } + return nil +} + +// verify that the auto-increment counter is correct for table with universal-id support. +func (m *Migrate) verify(ctx context.Context, tx dialect.Tx, t *Table) error { + vr, ok := m.sqlDialect.(verifyRanger) + if !ok || !m.universalID { + return nil + } + id := indexOf(m.typeRanges, t.Name) + if id == -1 { + return nil + } + return vr.verifyRange(ctx, tx, t, int64(id<<32)) +} + +// types loads the type list from the type store. It will create the types table, if it does not exist yet. +func (m *Migrate) types(ctx context.Context, tx dialect.ExecQuerier) error { + exists, err := m.tableExist(ctx, tx, TypeTable) + if err != nil { + return err + } + if !exists { + t := NewTypesTable() + query, args := m.tBuilder(t).Query() + if err := tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("create types table: %w", err) + } + return nil + } + rows := &sql.Rows{} + query, args := sql.Dialect(m.Dialect()). + Select("type").From(sql.Table(TypeTable)).OrderBy(sql.Asc("id")).Query() + if err := tx.Query(ctx, query, args, rows); err != nil { + return fmt.Errorf("query types table: %w", err) + } + defer rows.Close() + return sql.ScanSlice(rows, &m.typeRanges) +} + +func (m *Migrate) allocPKRange(ctx context.Context, conn dialect.ExecQuerier, t *Table) error { + r, err := m.pkRange(ctx, conn, t) + if err != nil { + return err + } + return m.setRange(ctx, conn, t, r) +} + +func (m *Migrate) pkRange(ctx context.Context, conn dialect.ExecQuerier, t *Table) (int64, error) { + id := indexOf(m.typeRanges, t.Name) + // If the table re-created, re-use its range from + // the past. Otherwise, allocate a new id-range. + if id == -1 { + if len(m.typeRanges) > MaxTypes { + return 0, fmt.Errorf("max number of types exceeded: %d", MaxTypes) + } + query, args := sql.Dialect(m.Dialect()).Insert(TypeTable).Columns("type").Values(t.Name).Query() + if err := conn.Exec(ctx, query, args, nil); err != nil { + return 0, fmt.Errorf("insert into ent_types: %w", err) + } + id = len(m.typeRanges) + m.typeRanges = append(m.typeRanges, t.Name) + } + return int64(id << 32), nil +} + +// fkColumn returns the column name of a foreign-key. +func (m *Migrate) fkColumn(ctx context.Context, tx dialect.Tx, fk *ForeignKey) (string, error) { + t1 := sql.Table("INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS t1").Unquote().As("t1") + t2 := sql.Table("INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS t2").Unquote().As("t2") + query, args := sql.Dialect(m.Dialect()). + Select("column_name"). + From(t1). + Join(t2). + On(t1.C("constraint_name"), t2.C("constraint_name")). + Where(sql.And( + sql.EQ(t2.C("constraint_type"), sql.Raw("'FOREIGN KEY'")), + m.sqlDialect.(fkRenamer).matchSchema(t2.C("table_schema")), + m.sqlDialect.(fkRenamer).matchSchema(t1.C("table_schema")), + sql.EQ(t2.C("constraint_name"), fk.Symbol), + )). + Query() + rows := &sql.Rows{} + if err := tx.Query(ctx, query, args, rows); err != nil { + return "", fmt.Errorf("reading foreign-key %q column: %w", fk.Symbol, err) + } + defer rows.Close() + column, err := sql.ScanString(rows) + if err != nil { + return "", fmt.Errorf("scanning foreign-key %q column: %w", fk.Symbol, err) + } + return column, nil +} + +// setup ensures the table is configured properly, like table columns +// are linked to their indexes, and PKs columns are defined. +func (m *Migrate) setupTables(tables []*Table) { m.atlas.setupTables(tables) } + +// rollback calls to tx.Rollback and wraps the given error with the rollback error if occurred. +func rollback(tx dialect.Tx, err error) error { + err = fmt.Errorf("sql/schema: %w", err) + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%w: %v", err, rerr) + } + return err +} + +// exist checks if the given COUNT query returns a value >= 1. +func exist(ctx context.Context, conn dialect.ExecQuerier, query string, args ...any) (bool, error) { + rows := &sql.Rows{} + if err := conn.Query(ctx, query, args, rows); err != nil { + return false, fmt.Errorf("reading schema information %w", err) + } + defer rows.Close() + n, err := sql.ScanInt(rows) + if err != nil { + return false, err + } + return n > 0, nil +} + +func indexOf(a []string, s string) int { + for i := range a { + if a[i] == s { + return i + } + } + return -1 +} + +type sqlDialect interface { + atBuilder + dialect.Driver + init(context.Context) error + table(context.Context, dialect.Tx, string) (*Table, error) + tableExist(context.Context, dialect.ExecQuerier, string) (bool, error) + fkExist(context.Context, dialect.Tx, string) (bool, error) + setRange(context.Context, dialect.ExecQuerier, *Table, int64) error + dropIndex(context.Context, dialect.Tx, *Index, string) error + // table, column and index builder per dialect. + cType(*Column) string + tBuilder(*Table) *sql.TableBuilder + addIndex(*Index, string) *sql.IndexBuilder + alterColumns(table string, add, modify, drop []*Column) sql.Queries + needsConversion(*Column, *Column) bool +} + +type preparer interface { + prepare(context.Context, dialect.Tx, *changes, string) error +} + +// fkRenamer is used by the fixture migration (to solve #285), +// and it's implemented by the different dialects for renaming FKs. +type fkRenamer interface { + matchSchema(...string) *sql.Predicate + isImplicitIndex(*Index, *Column) bool + renameIndex(*Table, *Index, *Index) sql.Querier + renameColumn(*Table, *Column, *Column) sql.Querier +} + +// verifyRanger wraps the method for verifying global-id range correctness. +type verifyRanger interface { + verifyRange(context.Context, dialect.ExecQuerier, *Table, int64) error +} diff --git a/vendor/entgo.io/ent/dialect/sql/schema/mysql.go b/vendor/entgo.io/ent/dialect/sql/schema/mysql.go new file mode 100644 index 00000000..1017f45e --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/schema/mysql.go @@ -0,0 +1,997 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +import ( + "context" + "fmt" + "math" + "reflect" + "strconv" + "strings" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/schema/field" + + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/mysql" + "ariga.io/atlas/sql/schema" +) + +// MySQL is a MySQL migration driver. +type MySQL struct { + dialect.Driver + schema string + version string +} + +// init loads the MySQL version from the database for later use in the migration process. +func (d *MySQL) init(ctx context.Context) error { + rows := &sql.Rows{} + if err := d.Query(ctx, "SHOW VARIABLES LIKE 'version'", []any{}, rows); err != nil { + return fmt.Errorf("mysql: querying mysql version %w", err) + } + defer rows.Close() + if !rows.Next() { + if err := rows.Err(); err != nil { + return err + } + return fmt.Errorf("mysql: version variable was not found") + } + version := make([]string, 2) + if err := rows.Scan(&version[0], &version[1]); err != nil { + return fmt.Errorf("mysql: scanning mysql version: %w", err) + } + d.version = version[1] + return nil +} + +func (d *MySQL) tableExist(ctx context.Context, conn dialect.ExecQuerier, name string) (bool, error) { + query, args := sql.Select(sql.Count("*")).From(sql.Table("TABLES").Schema("INFORMATION_SCHEMA")). + Where(sql.And( + d.matchSchema(), + sql.EQ("TABLE_NAME", name), + )).Query() + return exist(ctx, conn, query, args...) +} + +func (d *MySQL) fkExist(ctx context.Context, tx dialect.Tx, name string) (bool, error) { + query, args := sql.Select(sql.Count("*")).From(sql.Table("TABLE_CONSTRAINTS").Schema("INFORMATION_SCHEMA")). + Where(sql.And( + d.matchSchema(), + sql.EQ("CONSTRAINT_TYPE", "FOREIGN KEY"), + sql.EQ("CONSTRAINT_NAME", name), + )).Query() + return exist(ctx, tx, query, args...) +} + +// table loads the current table description from the database. +func (d *MySQL) table(ctx context.Context, tx dialect.Tx, name string) (*Table, error) { + rows := &sql.Rows{} + query, args := sql.Select( + "column_name", "column_type", "is_nullable", "column_key", "column_default", "extra", "character_set_name", "collation_name", + "numeric_precision", "numeric_scale", + ). + From(sql.Table("COLUMNS").Schema("INFORMATION_SCHEMA")). + Where(sql.And( + d.matchSchema(), + sql.EQ("TABLE_NAME", name)), + ).Query() + if err := tx.Query(ctx, query, args, rows); err != nil { + return nil, fmt.Errorf("mysql: reading table description %w", err) + } + // Call Close in cases of failures (Close is idempotent). + defer rows.Close() + t := NewTable(name) + for rows.Next() { + c := &Column{} + if err := d.scanColumn(c, rows); err != nil { + return nil, fmt.Errorf("mysql: %w", err) + } + t.AddColumn(c) + } + if err := rows.Err(); err != nil { + return nil, err + } + if err := rows.Close(); err != nil { + return nil, fmt.Errorf("mysql: closing rows %w", err) + } + indexes, err := d.indexes(ctx, tx, t) + if err != nil { + return nil, err + } + // Add and link indexes to table columns. + for _, idx := range indexes { + t.addIndex(idx) + } + if _, ok := d.mariadb(); ok { + if err := d.normalizeJSON(ctx, tx, t); err != nil { + return nil, err + } + } + return t, nil +} + +// table loads the table indexes from the database. +func (d *MySQL) indexes(ctx context.Context, tx dialect.Tx, t *Table) ([]*Index, error) { + rows := &sql.Rows{} + query, args := sql.Select("index_name", "column_name", "sub_part", "non_unique", "seq_in_index"). + From(sql.Table("STATISTICS").Schema("INFORMATION_SCHEMA")). + Where(sql.And( + d.matchSchema(), + sql.EQ("TABLE_NAME", t.Name), + )). + OrderBy("index_name", "seq_in_index"). + Query() + if err := tx.Query(ctx, query, args, rows); err != nil { + return nil, fmt.Errorf("mysql: reading index description %w", err) + } + defer rows.Close() + idx, err := d.scanIndexes(rows, t) + if err != nil { + return nil, fmt.Errorf("mysql: %w", err) + } + return idx, nil +} + +func (d *MySQL) setRange(ctx context.Context, conn dialect.ExecQuerier, t *Table, value int64) error { + return conn.Exec(ctx, fmt.Sprintf("ALTER TABLE `%s` AUTO_INCREMENT = %d", t.Name, value), []any{}, nil) +} + +func (d *MySQL) verifyRange(ctx context.Context, tx dialect.ExecQuerier, t *Table, expected int64) error { + if expected == 0 { + return nil + } + rows := &sql.Rows{} + query, args := sql.Select("AUTO_INCREMENT"). + From(sql.Table("TABLES").Schema("INFORMATION_SCHEMA")). + Where(sql.And( + d.matchSchema(), + sql.EQ("TABLE_NAME", t.Name), + )). + Query() + if err := tx.Query(ctx, query, args, rows); err != nil { + return fmt.Errorf("mysql: query auto_increment %w", err) + } + // Call Close in cases of failures (Close is idempotent). + defer rows.Close() + actual := &sql.NullInt64{} + if err := sql.ScanOne(rows, actual); err != nil { + return fmt.Errorf("mysql: scan auto_increment %w", err) + } + if err := rows.Close(); err != nil { + return err + } + // Table is empty and auto-increment is not configured. This can happen + // because MySQL (< 8.0) stores the auto-increment counter in main memory + // (not persistent), and the value is reset on restart (if table is empty). + if actual.Int64 <= 1 { + return d.setRange(ctx, tx, t, expected) + } + return nil +} + +// tBuilder returns the MySQL DSL query for table creation. +func (d *MySQL) tBuilder(t *Table) *sql.TableBuilder { + b := sql.CreateTable(t.Name).IfNotExists() + for _, c := range t.Columns { + b.Column(d.addColumn(c)) + } + for _, pk := range t.PrimaryKey { + b.PrimaryKey(pk.Name) + } + // Charset and collation config on MySQL table. + // These options can be overridden by the entsql annotation. + b.Charset("utf8mb4").Collate("utf8mb4_bin") + if t.Annotation != nil { + if charset := t.Annotation.Charset; charset != "" { + b.Charset(charset) + } + if collate := t.Annotation.Collation; collate != "" { + b.Collate(collate) + } + if opts := t.Annotation.Options; opts != "" { + b.Options(opts) + } + addChecks(b, t.Annotation) + } + return b +} + +// cType returns the MySQL string type for the given column. +func (d *MySQL) cType(c *Column) (t string) { + if c.SchemaType != nil && c.SchemaType[dialect.MySQL] != "" { + // MySQL returns the column type lower cased. + return strings.ToLower(c.SchemaType[dialect.MySQL]) + } + switch c.Type { + case field.TypeBool: + t = "boolean" + case field.TypeInt8: + t = "tinyint" + case field.TypeUint8: + t = "tinyint unsigned" + case field.TypeInt16: + t = "smallint" + case field.TypeUint16: + t = "smallint unsigned" + case field.TypeInt32: + t = "int" + case field.TypeUint32: + t = "int unsigned" + case field.TypeInt, field.TypeInt64: + t = "bigint" + case field.TypeUint, field.TypeUint64: + t = "bigint unsigned" + case field.TypeBytes: + size := int64(math.MaxUint16) + if c.Size > 0 { + size = c.Size + } + switch { + case size <= math.MaxUint8: + t = "tinyblob" + case size <= math.MaxUint16: + t = "blob" + case size < 1<<24: + t = "mediumblob" + case size <= math.MaxUint32: + t = "longblob" + } + case field.TypeJSON: + t = "json" + if compareVersions(d.version, "5.7.8") == -1 { + t = "longblob" + } + case field.TypeString: + size := c.Size + if size == 0 { + size = d.defaultSize(c) + } + switch { + case c.typ == "tinytext", c.typ == "text": + t = c.typ + case size <= math.MaxUint16: + t = fmt.Sprintf("varchar(%d)", size) + case size == 1<<24-1: + t = "mediumtext" + default: + t = "longtext" + } + case field.TypeFloat32, field.TypeFloat64: + t = c.scanTypeOr("double") + case field.TypeTime: + t = c.scanTypeOr("timestamp") + // In MariaDB or in MySQL < v8.0.2, the TIMESTAMP column has both `DEFAULT CURRENT_TIMESTAMP` + // and `ON UPDATE CURRENT_TIMESTAMP` if neither is specified explicitly. this behavior is + // suppressed if the column is defined with a `DEFAULT` clause or with the `NULL` attribute. + if _, maria := d.mariadb(); maria || compareVersions(d.version, "8.0.2") == -1 && c.Default == nil { + c.Nullable = c.Attr == "" + } + case field.TypeEnum: + values := make([]string, len(c.Enums)) + for i, e := range c.Enums { + values[i] = fmt.Sprintf("'%s'", e) + } + t = fmt.Sprintf("enum(%s)", strings.Join(values, ", ")) + case field.TypeUUID: + t = "char(36) binary" + case field.TypeOther: + t = c.typ + default: + panic(fmt.Sprintf("unsupported type %q for column %q", c.Type.String(), c.Name)) + } + return t +} + +// addColumn returns the DSL query for adding the given column to a table. +// The syntax/order is: datatype [Charset] [Unique|Increment] [Collation] [Nullable]. +func (d *MySQL) addColumn(c *Column) *sql.ColumnBuilder { + b := sql.Column(c.Name).Type(d.cType(c)).Attr(c.Attr) + c.unique(b) + if c.Increment { + b.Attr("AUTO_INCREMENT") + } + c.nullable(b) + c.defaultValue(b) + if c.Collation != "" { + b.Attr("COLLATE " + c.Collation) + } + if c.Type == field.TypeJSON { + // Manually add a `CHECK` clause for older versions of MariaDB for validating the + // JSON documents. This constraint is automatically included from version 10.4.3. + if version, ok := d.mariadb(); ok && compareVersions(version, "10.4.3") == -1 { + b.Check(func(b *sql.Builder) { + b.WriteString("JSON_VALID(").Ident(c.Name).WriteByte(')') + }) + } + } + return b +} + +// addIndex returns the querying for adding an index to MySQL. +func (d *MySQL) addIndex(i *Index, table string) *sql.IndexBuilder { + idx := sql.CreateIndex(i.Name).Table(table) + if i.Unique { + idx.Unique() + } + parts := indexParts(i) + for _, c := range i.Columns { + part, ok := parts[c.Name] + if !ok || part == 0 { + idx.Column(c.Name) + } else { + idx.Column(fmt.Sprintf("%s(%d)", idx.Builder.Quote(c.Name), part)) + } + } + return idx +} + +// dropIndex drops a MySQL index. +func (d *MySQL) dropIndex(ctx context.Context, tx dialect.Tx, idx *Index, table string) error { + query, args := idx.DropBuilder(table).Query() + return tx.Exec(ctx, query, args, nil) +} + +// prepare runs preparation work that needs to be done to apply the change-set. +func (d *MySQL) prepare(ctx context.Context, tx dialect.Tx, change *changes, table string) error { + for _, idx := range change.index.drop { + switch n := len(idx.columns); { + case n == 0: + return fmt.Errorf("index %q has no columns", idx.Name) + case n > 1: + continue // not a foreign-key index. + } + var qr sql.Querier + Switch: + switch col, ok := change.dropColumn(idx.columns[0]); { + // If both the index and the column need to be dropped, the foreign-key + // constraint that is associated with them need to be dropped as well. + case ok: + names, err := d.fkNames(ctx, tx, table, col.Name) + if err != nil { + return err + } + if len(names) == 1 { + qr = sql.AlterTable(table).DropForeignKey(names[0]) + } + // If the uniqueness was dropped from a foreign-key column, + // create a "simple index" if no other index exist for it. + case !ok && idx.Unique && len(idx.Columns) > 0: + col := idx.Columns[0] + for _, idx2 := range col.indexes { + if idx2 != idx && len(idx2.columns) == 1 { + break Switch + } + } + names, err := d.fkNames(ctx, tx, table, col.Name) + if err != nil { + return err + } + if len(names) == 1 { + qr = sql.CreateIndex(names[0]).Table(table).Columns(col.Name) + } + } + if qr != nil { + query, args := qr.Query() + if err := tx.Exec(ctx, query, args, nil); err != nil { + return err + } + } + } + return nil +} + +// scanColumn scans the column information from MySQL column description. +func (d *MySQL) scanColumn(c *Column, rows *sql.Rows) error { + var ( + nullable sql.NullString + defaults sql.NullString + numericPrecision sql.NullInt64 + numericScale sql.NullInt64 + ) + if err := rows.Scan(&c.Name, &c.typ, &nullable, &c.Key, &defaults, &c.Attr, &sql.NullString{}, &sql.NullString{}, &numericPrecision, &numericScale); err != nil { + return fmt.Errorf("scanning column description: %w", err) + } + c.Unique = c.UniqueKey() + if nullable.Valid { + c.Nullable = nullable.String == "YES" + } + if c.typ == "" { + return fmt.Errorf("missing type information for column %q", c.Name) + } + parts, size, unsigned, err := parseColumn(c.typ) + if err != nil { + return err + } + switch parts[0] { + case "mediumint", "int": + c.Type = field.TypeInt32 + if unsigned { + c.Type = field.TypeUint32 + } + case "smallint": + c.Type = field.TypeInt16 + if unsigned { + c.Type = field.TypeUint16 + } + case "bigint": + c.Type = field.TypeInt64 + if unsigned { + c.Type = field.TypeUint64 + } + case "tinyint": + switch { + case size == 1: + c.Type = field.TypeBool + case unsigned: + c.Type = field.TypeUint8 + default: + c.Type = field.TypeInt8 + } + case "double", "float": + c.Type = field.TypeFloat64 + case "numeric", "decimal": + c.Type = field.TypeFloat64 + // If precision is specified then we should take that into account. + if numericPrecision.Valid { + schemaType := fmt.Sprintf("%s(%d,%d)", parts[0], numericPrecision.Int64, numericScale.Int64) + c.SchemaType = map[string]string{dialect.MySQL: schemaType} + } + case "time", "timestamp", "date", "datetime": + c.Type = field.TypeTime + // The mapping from schema defaults to database + // defaults is not supported for TypeTime fields. + defaults = sql.NullString{} + case "tinyblob": + c.Size = math.MaxUint8 + c.Type = field.TypeBytes + case "blob": + c.Size = math.MaxUint16 + c.Type = field.TypeBytes + case "mediumblob": + c.Size = 1<<24 - 1 + c.Type = field.TypeBytes + case "longblob": + c.Size = math.MaxUint32 + c.Type = field.TypeBytes + case "binary", "varbinary": + c.Type = field.TypeBytes + c.Size = size + case "varchar": + c.Type = field.TypeString + c.Size = size + case "text": + c.Size = math.MaxUint16 + c.Type = field.TypeString + case "mediumtext": + c.Size = 1<<24 - 1 + c.Type = field.TypeString + case "longtext": + c.Size = math.MaxInt32 + c.Type = field.TypeString + case "json": + c.Type = field.TypeJSON + case "enum": + c.Type = field.TypeEnum + // Parse the enum values according to the MySQL format. + // github.com/mysql/mysql-server/blob/8.0/sql/field.cc#Field_enum::sql_type + values := strings.TrimSuffix(strings.TrimPrefix(c.typ, "enum("), ")") + if values == "" { + return fmt.Errorf("mysql: unexpected enum type: %q", c.typ) + } + parts := strings.Split(values, "','") + for i := range parts { + c.Enums = append(c.Enums, strings.Trim(parts[i], "'")) + } + case "char": + c.Type = field.TypeOther + // UUID field has length of 36 characters (32 alphanumeric characters and 4 hyphens). + if size == 36 { + c.Type = field.TypeUUID + } + case "point", "geometry", "linestring", "polygon": + c.Type = field.TypeOther + default: + return fmt.Errorf("unknown column type %q for version %q", parts[0], d.version) + } + if defaults.Valid { + return c.ScanDefault(defaults.String) + } + return nil +} + +// scanIndexes scans sql.Rows into an Indexes list. The query for returning the rows, +// should return the following 5 columns: INDEX_NAME, COLUMN_NAME, SUB_PART, NON_UNIQUE, +// SEQ_IN_INDEX. SEQ_IN_INDEX specifies the position of the column in the index columns. +func (d *MySQL) scanIndexes(rows *sql.Rows, t *Table) (Indexes, error) { + var ( + i Indexes + names = make(map[string]*Index) + ) + for rows.Next() { + var ( + name string + column string + nonuniq bool + seqindex int + subpart sql.NullInt64 + ) + if err := rows.Scan(&name, &column, &subpart, &nonuniq, &seqindex); err != nil { + return nil, fmt.Errorf("scanning index description: %w", err) + } + // Skip primary keys. + if name == "PRIMARY" { + c, ok := t.column(column) + if !ok { + return nil, fmt.Errorf("missing primary-key column: %q", column) + } + t.PrimaryKey = append(t.PrimaryKey, c) + continue + } + idx, ok := names[name] + if !ok { + idx = &Index{Name: name, Unique: !nonuniq, Annotation: &entsql.IndexAnnotation{}} + i = append(i, idx) + names[name] = idx + } + idx.columns = append(idx.columns, column) + if subpart.Int64 > 0 { + if idx.Annotation.PrefixColumns == nil { + idx.Annotation.PrefixColumns = make(map[string]uint) + } + idx.Annotation.PrefixColumns[column] = uint(subpart.Int64) + } + } + if err := rows.Err(); err != nil { + return nil, err + } + return i, nil +} + +// isImplicitIndex reports if the index was created implicitly for the unique column. +func (d *MySQL) isImplicitIndex(idx *Index, col *Column) bool { + // We execute `CHANGE COLUMN` on older versions of MySQL (<8.0), which + // auto create the new index. The old one, will be dropped in `changeSet`. + if compareVersions(d.version, "8.0.0") >= 0 { + return idx.Name == col.Name && col.Unique + } + return false +} + +// renameColumn returns the statement for renaming a column in +// MySQL based on its version. +func (d *MySQL) renameColumn(t *Table, old, new *Column) sql.Querier { + q := sql.AlterTable(t.Name) + if compareVersions(d.version, "8.0.0") >= 0 { + return q.RenameColumn(old.Name, new.Name) + } + return q.ChangeColumn(old.Name, d.addColumn(new)) +} + +// renameIndex returns the statement for renaming an index. +func (d *MySQL) renameIndex(t *Table, old, new *Index) sql.Querier { + q := sql.AlterTable(t.Name) + if compareVersions(d.version, "5.7.0") >= 0 { + return q.RenameIndex(old.Name, new.Name) + } + return q.DropIndex(old.Name).AddIndex(new.Builder(t.Name)) +} + +// matchSchema returns the predicate for matching table schema. +func (d *MySQL) matchSchema(columns ...string) *sql.Predicate { + column := "TABLE_SCHEMA" + if len(columns) > 0 { + column = columns[0] + } + if d.schema != "" { + return sql.EQ(column, d.schema) + } + return sql.EQ(column, sql.Raw("(SELECT DATABASE())")) +} + +// tables returns the query for getting the in the schema. +func (d *MySQL) tables() sql.Querier { + return sql.Select("TABLE_NAME"). + From(sql.Table("TABLES").Schema("INFORMATION_SCHEMA")). + Where(d.matchSchema()) +} + +// alterColumns returns the queries for applying the columns change-set. +func (d *MySQL) alterColumns(table string, add, modify, drop []*Column) sql.Queries { + b := sql.Dialect(dialect.MySQL).AlterTable(table) + for _, c := range add { + b.AddColumn(d.addColumn(c)) + } + for _, c := range modify { + b.ModifyColumn(d.addColumn(c)) + } + for _, c := range drop { + b.DropColumn(sql.Dialect(dialect.MySQL).Column(c.Name)) + } + if len(b.Queries) == 0 { + return nil + } + return sql.Queries{b} +} + +// normalizeJSON normalize MariaDB longtext columns to type JSON. +func (d *MySQL) normalizeJSON(ctx context.Context, tx dialect.Tx, t *Table) error { + columns := make(map[string]*Column) + for _, c := range t.Columns { + if c.typ == "longtext" { + columns[c.Name] = c + } + } + if len(columns) == 0 { + return nil + } + rows := &sql.Rows{} + query, args := sql.Select("CONSTRAINT_NAME"). + From(sql.Table("CHECK_CONSTRAINTS").Schema("INFORMATION_SCHEMA")). + Where(sql.And( + d.matchSchema("CONSTRAINT_SCHEMA"), + sql.EQ("TABLE_NAME", t.Name), + sql.Like("CHECK_CLAUSE", "json_valid(%)"), + )). + Query() + if err := tx.Query(ctx, query, args, rows); err != nil { + return fmt.Errorf("mysql: query table constraints %w", err) + } + // Call Close in cases of failures (Close is idempotent). + defer rows.Close() + names := make([]string, 0, len(columns)) + if err := sql.ScanSlice(rows, &names); err != nil { + return fmt.Errorf("mysql: scan table constraints: %w", err) + } + if err := rows.Err(); err != nil { + return err + } + if err := rows.Close(); err != nil { + return err + } + for _, name := range names { + c, ok := columns[name] + if ok { + c.Type = field.TypeJSON + } + } + return nil +} + +// mariadb reports if the migration runs on MariaDB and returns the semver string. +func (d *MySQL) mariadb() (string, bool) { + idx := strings.Index(d.version, "MariaDB") + if idx == -1 { + return "", false + } + return d.version[:idx-1], true +} + +// parseColumn returns column parts, size and signed-info from a MySQL type. +func parseColumn(typ string) (parts []string, size int64, unsigned bool, err error) { + switch parts = strings.FieldsFunc(typ, func(r rune) bool { + return r == '(' || r == ')' || r == ' ' || r == ',' + }); parts[0] { + case "tinyint", "smallint", "mediumint", "int", "bigint": + switch { + case len(parts) == 2 && parts[1] == "unsigned": // int unsigned + unsigned = true + case len(parts) == 3: // int(10) unsigned + unsigned = true + fallthrough + case len(parts) == 2: // int(10) + size, err = strconv.ParseInt(parts[1], 10, 0) + } + case "varbinary", "varchar", "char", "binary": + if len(parts) > 1 { + size, err = strconv.ParseInt(parts[1], 10, 64) + } + } + if err != nil { + return parts, size, unsigned, fmt.Errorf("converting %s size to int: %w", parts[0], err) + } + return parts, size, unsigned, nil +} + +// fkNames returns the foreign-key names of a column. +func (d *MySQL) fkNames(ctx context.Context, tx dialect.Tx, table, column string) ([]string, error) { + query, args := sql.Select("CONSTRAINT_NAME").From(sql.Table("KEY_COLUMN_USAGE").Schema("INFORMATION_SCHEMA")). + Where(sql.And( + sql.EQ("TABLE_NAME", table), + sql.EQ("COLUMN_NAME", column), + // NULL for unique and primary-key constraints. + sql.NotNull("POSITION_IN_UNIQUE_CONSTRAINT"), + d.matchSchema(), + )). + Query() + var ( + names []string + rows = &sql.Rows{} + ) + if err := tx.Query(ctx, query, args, rows); err != nil { + return nil, fmt.Errorf("mysql: reading constraint names %w", err) + } + defer rows.Close() + if err := sql.ScanSlice(rows, &names); err != nil { + return nil, err + } + return names, nil +} + +// defaultSize returns the default size for MySQL/MariaDB varchar type +// based on column size, charset and table indexes, in order to avoid +// index prefix key limit (767) for older versions of MySQL/MariaDB. +func (d *MySQL) defaultSize(c *Column) int64 { + size := DefaultStringLen + version, checked := d.version, "5.7.0" + if v, ok := d.mariadb(); ok { + version, checked = v, "10.2.2" + } + switch { + // Version is >= 5.7 for MySQL, or >= 10.2.2 for MariaDB. + case compareVersions(version, checked) != -1: + // Column is non-unique, or not part of any index (reaching + // the error 1071). + case !c.Unique && len(c.indexes) == 0 && !c.PrimaryKey(): + default: + size = 191 + } + return size +} + +// needsConversion reports if column "old" needs to be converted +// (by table altering) to column "new". +func (d *MySQL) needsConversion(old, new *Column) bool { + return d.cType(old) != d.cType(new) +} + +// indexModified used by the migration differ to check if the index was modified. +func (d *MySQL) indexModified(old, new *Index) bool { + oldParts, newParts := indexParts(old), indexParts(new) + if len(oldParts) != len(newParts) { + return true + } + for column, oldPart := range oldParts { + newPart, ok := newParts[column] + if !ok || oldPart != newPart { + return true + } + } + return false +} + +// indexParts returns a map holding the sub_part mapping if exists. +func indexParts(idx *Index) map[string]uint { + parts := make(map[string]uint) + if idx.Annotation == nil { + return parts + } + // If prefix (without a name) was defined on the + // annotation, map it to the single column index. + if idx.Annotation.Prefix > 0 && len(idx.Columns) == 1 { + parts[idx.Columns[0].Name] = idx.Annotation.Prefix + } + for column, part := range idx.Annotation.PrefixColumns { + parts[column] = part + } + return parts +} + +// Atlas integration. + +func (d *MySQL) atOpen(conn dialect.ExecQuerier) (migrate.Driver, error) { + return mysql.Open(&db{ExecQuerier: conn}) +} + +func (d *MySQL) atTable(t1 *Table, t2 *schema.Table) { + t2.SetCharset("utf8mb4").SetCollation("utf8mb4_bin") + if t1.Annotation == nil { + return + } + if charset := t1.Annotation.Charset; charset != "" { + t2.SetCharset(charset) + } + if collate := t1.Annotation.Collation; collate != "" { + t2.SetCollation(collate) + } + if opts := t1.Annotation.Options; opts != "" { + t2.AddAttrs(&mysql.CreateOptions{ + V: opts, + }) + } + // Check if the connected database supports the CHECK clause. + // For MySQL, is >= "8.0.16" and for MariaDB it is "10.2.1". + v1, v2 := d.version, "8.0.16" + if v, ok := d.mariadb(); ok { + v1, v2 = v, "10.2.1" + } + if compareVersions(v1, v2) >= 0 { + setAtChecks(t1, t2) + } +} + +func (d *MySQL) supportsDefault(c *Column) bool { + _, maria := d.mariadb() + switch c.Default.(type) { + case Expr, map[string]Expr: + if maria { + return compareVersions(d.version, "10.2.0") >= 0 + } + return c.supportDefault() && compareVersions(d.version, "8.0.0") >= 0 + default: + return c.supportDefault() || maria + } +} + +func (d *MySQL) atTypeC(c1 *Column, c2 *schema.Column) error { + if c1.SchemaType != nil && c1.SchemaType[dialect.MySQL] != "" { + t, err := mysql.ParseType(strings.ToLower(c1.SchemaType[dialect.MySQL])) + if err != nil { + return err + } + c2.Type.Type = t + return nil + } + var t schema.Type + switch c1.Type { + case field.TypeBool: + t = &schema.BoolType{T: "boolean"} + case field.TypeInt8: + t = &schema.IntegerType{T: mysql.TypeTinyInt} + case field.TypeUint8: + t = &schema.IntegerType{T: mysql.TypeTinyInt, Unsigned: true} + case field.TypeInt16: + t = &schema.IntegerType{T: mysql.TypeSmallInt} + case field.TypeUint16: + t = &schema.IntegerType{T: mysql.TypeSmallInt, Unsigned: true} + case field.TypeInt32: + t = &schema.IntegerType{T: mysql.TypeInt} + case field.TypeUint32: + t = &schema.IntegerType{T: mysql.TypeInt, Unsigned: true} + case field.TypeInt, field.TypeInt64: + t = &schema.IntegerType{T: mysql.TypeBigInt} + case field.TypeUint, field.TypeUint64: + t = &schema.IntegerType{T: mysql.TypeBigInt, Unsigned: true} + case field.TypeBytes: + size := int64(math.MaxUint16) + if c1.Size > 0 { + size = c1.Size + } + switch { + case size <= math.MaxUint8: + t = &schema.BinaryType{T: mysql.TypeTinyBlob} + case size <= math.MaxUint16: + t = &schema.BinaryType{T: mysql.TypeBlob} + case size < 1<<24: + t = &schema.BinaryType{T: mysql.TypeMediumBlob} + case size <= math.MaxUint32: + t = &schema.BinaryType{T: mysql.TypeLongBlob} + } + case field.TypeJSON: + t = &schema.JSONType{T: mysql.TypeJSON} + if compareVersions(d.version, "5.7.8") == -1 { + t = &schema.BinaryType{T: mysql.TypeLongBlob} + } + case field.TypeString: + size := c1.Size + if size == 0 { + size = d.defaultSize(c1) + } + switch { + case c1.typ == "tinytext", c1.typ == "text": + t = &schema.StringType{T: c1.typ} + case size <= math.MaxUint16: + t = &schema.StringType{T: mysql.TypeVarchar, Size: int(size)} + case size == 1<<24-1: + t = &schema.StringType{T: mysql.TypeMediumText} + default: + t = &schema.StringType{T: mysql.TypeLongText} + } + case field.TypeFloat32, field.TypeFloat64: + t = &schema.FloatType{T: c1.scanTypeOr(mysql.TypeDouble)} + case field.TypeTime: + t = &schema.TimeType{T: c1.scanTypeOr(mysql.TypeTimestamp)} + // In MariaDB or in MySQL < v8.0.2, the TIMESTAMP column has both `DEFAULT CURRENT_TIMESTAMP` + // and `ON UPDATE CURRENT_TIMESTAMP` if neither is specified explicitly. this behavior is + // suppressed if the column is defined with a `DEFAULT` clause or with the `NULL` attribute. + if _, maria := d.mariadb(); maria || compareVersions(d.version, "8.0.2") == -1 && c1.Default == nil { + c2.SetNull(c1.Attr == "") + } + case field.TypeEnum: + t = &schema.EnumType{T: mysql.TypeEnum, Values: c1.Enums} + case field.TypeUUID: + // "CHAR(X) BINARY" is treated as "CHAR(X) COLLATE latin1_bin", and in MySQL < 8, + // and "COLLATE utf8mb4_bin" in MySQL >= 8. However we already set the table to + t = &schema.StringType{T: mysql.TypeChar, Size: 36} + c2.SetCollation("utf8mb4_bin") + default: + t, err := mysql.ParseType(strings.ToLower(c1.typ)) + if err != nil { + return err + } + c2.Type.Type = t + } + c2.Type.Type = t + return nil +} + +func (d *MySQL) atUniqueC(t1 *Table, c1 *Column, t2 *schema.Table, c2 *schema.Column) { + // For UNIQUE columns, MySQL create an implicit index + // named as the column with an extra index in case the + // name is already taken (, , , ...). + for _, idx := range t1.Indexes { + // Index also defined explicitly, and will be add in atIndexes. + if idx.Unique && d.atImplicitIndexName(idx, c1) { + return + } + } + t2.AddIndexes(schema.NewUniqueIndex(c1.Name).AddColumns(c2)) +} + +func (d *MySQL) atIncrementC(t *schema.Table, c *schema.Column) { + if c.Default != nil { + t.Attrs = removeAttr(t.Attrs, reflect.TypeOf(&mysql.AutoIncrement{})) + } else { + c.AddAttrs(&mysql.AutoIncrement{}) + } +} + +func (d *MySQL) atIncrementT(t *schema.Table, v int64) { + t.AddAttrs(&mysql.AutoIncrement{V: v}) +} + +func (d *MySQL) atImplicitIndexName(idx *Index, c1 *Column) bool { + if idx.Name == c1.Name { + return true + } + if !strings.HasPrefix(idx.Name, c1.Name+"_") { + return false + } + i, err := strconv.ParseInt(strings.TrimLeft(idx.Name, c1.Name+"_"), 10, 64) + return err == nil && i > 1 +} + +func (d *MySQL) atIndex(idx1 *Index, t2 *schema.Table, idx2 *schema.Index) error { + prefix := indexParts(idx1) + for _, c1 := range idx1.Columns { + c2, ok := t2.Column(c1.Name) + if !ok { + return fmt.Errorf("unexpected index %q column: %q", idx1.Name, c1.Name) + } + part := &schema.IndexPart{C: c2} + if v, ok := prefix[c1.Name]; ok { + part.AddAttrs(&mysql.SubPart{Len: int(v)}) + } + idx2.AddParts(part) + } + if t, ok := indexType(idx1, dialect.MySQL); ok { + idx2.AddAttrs(&mysql.IndexType{T: t}) + } + return nil +} + +func indexType(idx *Index, d string) (string, bool) { + ant := idx.Annotation + if ant == nil { + return "", false + } + if ant.Types != nil && ant.Types[d] != "" { + return ant.Types[d], true + } + if ant.Type != "" { + return ant.Type, true + } + return "", false +} + +func (MySQL) atTypeRangeSQL(ts ...string) string { + for i := range ts { + ts[i] = fmt.Sprintf("('%s')", ts[i]) + } + return fmt.Sprintf("INSERT INTO `%s` (`type`) VALUES %s", TypeTable, strings.Join(ts, ", ")) +} diff --git a/vendor/entgo.io/ent/dialect/sql/schema/postgres.go b/vendor/entgo.io/ent/dialect/sql/schema/postgres.go new file mode 100644 index 00000000..b778b2e5 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/schema/postgres.go @@ -0,0 +1,851 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +import ( + "context" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/schema/field" + + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/postgres" + "ariga.io/atlas/sql/schema" +) + +// Postgres is a postgres migration driver. +type Postgres struct { + dialect.Driver + schema string + version string +} + +// init loads the Postgres version from the database for later use in the migration process. +// It returns an error if the server version is lower than v10. +func (d *Postgres) init(ctx context.Context) error { + rows := &sql.Rows{} + if err := d.Query(ctx, "SHOW server_version_num", []any{}, rows); err != nil { + return fmt.Errorf("querying server version %w", err) + } + defer rows.Close() + if !rows.Next() { + if err := rows.Err(); err != nil { + return err + } + return fmt.Errorf("server_version_num variable was not found") + } + var version string + if err := rows.Scan(&version); err != nil { + return fmt.Errorf("scanning version: %w", err) + } + if len(version) < 6 { + return fmt.Errorf("malformed version: %s", version) + } + d.version = fmt.Sprintf("%s.%s.%s", version[:2], version[2:4], version[4:]) + if compareVersions(d.version, "10.0.0") == -1 { + return fmt.Errorf("unsupported postgres version: %s", d.version) + } + return nil +} + +// tableExist checks if a table exists in the database and current schema. +func (d *Postgres) tableExist(ctx context.Context, conn dialect.ExecQuerier, name string) (bool, error) { + query, args := sql.Dialect(dialect.Postgres). + Select(sql.Count("*")).From(sql.Table("tables").Schema("information_schema")). + Where(sql.And( + d.matchSchema(), + sql.EQ("table_name", name), + )).Query() + return exist(ctx, conn, query, args...) +} + +// tableExist checks if a foreign-key exists in the current schema. +func (d *Postgres) fkExist(ctx context.Context, tx dialect.Tx, name string) (bool, error) { + query, args := sql.Dialect(dialect.Postgres). + Select(sql.Count("*")).From(sql.Table("table_constraints").Schema("information_schema")). + Where(sql.And( + d.matchSchema(), + sql.EQ("constraint_type", "FOREIGN KEY"), + sql.EQ("constraint_name", name), + )).Query() + return exist(ctx, tx, query, args...) +} + +// setRange sets restart the identity column to the given offset. Used by the universal-id option. +func (d *Postgres) setRange(ctx context.Context, conn dialect.ExecQuerier, t *Table, value int64) error { + if value == 0 { + value = 1 // RESTART value cannot be < 1. + } + pk := "id" + if len(t.PrimaryKey) == 1 { + pk = t.PrimaryKey[0].Name + } + return conn.Exec(ctx, fmt.Sprintf("ALTER TABLE %q ALTER COLUMN %q RESTART WITH %d", t.Name, pk, value), []any{}, nil) +} + +// table loads the current table description from the database. +func (d *Postgres) table(ctx context.Context, tx dialect.Tx, name string) (*Table, error) { + rows := &sql.Rows{} + query, args := sql.Dialect(dialect.Postgres). + Select( + "column_name", "data_type", "is_nullable", "column_default", "udt_name", + "numeric_precision", "numeric_scale", "character_maximum_length", + ). + From(sql.Table("columns").Schema("information_schema")). + Where(sql.And( + d.matchSchema(), + sql.EQ("table_name", name), + )).Query() + if err := tx.Query(ctx, query, args, rows); err != nil { + return nil, fmt.Errorf("postgres: reading table description %w", err) + } + // Call `Close` in cases of failures (`Close` is idempotent). + defer rows.Close() + t := NewTable(name) + for rows.Next() { + c := &Column{} + if err := d.scanColumn(c, rows); err != nil { + return nil, err + } + t.AddColumn(c) + } + if err := rows.Err(); err != nil { + return nil, err + } + if err := rows.Close(); err != nil { + return nil, fmt.Errorf("closing rows %w", err) + } + idxs, err := d.indexes(ctx, tx, name) + if err != nil { + return nil, err + } + // Populate the index information to the table and its columns. + // We do it manually, because PK and uniqueness information does + // not exist when querying the information_schema.COLUMNS above. + for _, idx := range idxs { + switch { + case idx.primary: + for _, name := range idx.columns { + c, ok := t.column(name) + if !ok { + return nil, fmt.Errorf("index %q column %q was not found in table %q", idx.Name, name, t.Name) + } + c.Key = PrimaryKey + t.PrimaryKey = append(t.PrimaryKey, c) + } + case idx.Unique && len(idx.columns) == 1: + name := idx.columns[0] + c, ok := t.column(name) + if !ok { + return nil, fmt.Errorf("index %q column %q was not found in table %q", idx.Name, name, t.Name) + } + c.Key = UniqueKey + c.Unique = true + fallthrough + default: + t.addIndex(idx) + } + } + return t, nil +} + +// indexesQuery holds a query format for retrieving +// table indexes of the current schema. +const indexesQuery = ` +SELECT i.relname AS index_name, + a.attname AS column_name, + idx.indisprimary AS primary, + idx.indisunique AS unique, + array_position(idx.indkey, a.attnum) as seq_in_index +FROM pg_class t, + pg_class i, + pg_index idx, + pg_attribute a, + pg_namespace n +WHERE t.oid = idx.indrelid + AND i.oid = idx.indexrelid + AND n.oid = t.relnamespace + AND a.attrelid = t.oid + AND a.attnum = ANY(idx.indkey) + AND t.relkind = 'r' + AND n.nspname = %s + AND t.relname = '%s' +ORDER BY index_name, seq_in_index; +` + +// indexesQuery returns the query (and its placeholders) for getting table indexes. +func (d *Postgres) indexesQuery(table string) (string, []any) { + if d.schema != "" { + return fmt.Sprintf(indexesQuery, "$1", table), []any{d.schema} + } + return fmt.Sprintf(indexesQuery, "CURRENT_SCHEMA()", table), nil +} + +func (d *Postgres) indexes(ctx context.Context, tx dialect.Tx, table string) (Indexes, error) { + rows := &sql.Rows{} + query, args := d.indexesQuery(table) + if err := tx.Query(ctx, query, args, rows); err != nil { + return nil, fmt.Errorf("querying indexes for table %s: %w", table, err) + } + defer rows.Close() + var ( + idxs Indexes + names = make(map[string]*Index) + ) + for rows.Next() { + var ( + seqindex int + name, column string + unique, primary bool + ) + if err := rows.Scan(&name, &column, &primary, &unique, &seqindex); err != nil { + return nil, fmt.Errorf("scanning index description: %w", err) + } + // If the index is prefixed with the table, it may was added by + // `addIndex` and it should be trimmed. But, since entc prefixes + // all indexes with schema-type, for uncountable types (like, media + // or equipment) this isn't correct, and we fallback for the real-name. + short := strings.TrimPrefix(name, table+"_") + idx, ok := names[short] + if !ok { + idx = &Index{Name: short, Unique: unique, primary: primary, realname: name} + idxs = append(idxs, idx) + names[short] = idx + } + idx.columns = append(idx.columns, column) + } + if err := rows.Err(); err != nil { + return nil, err + } + return idxs, nil +} + +// maxCharSize defines the maximum size of limited character types in Postgres (10 MB). +const maxCharSize = 10 << 20 + +// scanColumn scans the information a column from column description. +func (d *Postgres) scanColumn(c *Column, rows *sql.Rows) error { + var ( + nullable sql.NullString + defaults sql.NullString + udt sql.NullString + numericPrecision sql.NullInt64 + numericScale sql.NullInt64 + characterMaximumLen sql.NullInt64 + ) + if err := rows.Scan(&c.Name, &c.typ, &nullable, &defaults, &udt, &numericPrecision, &numericScale, &characterMaximumLen); err != nil { + return fmt.Errorf("scanning column description: %w", err) + } + if nullable.Valid { + c.Nullable = nullable.String == "YES" + } + switch c.typ { + case "boolean": + c.Type = field.TypeBool + case "smallint": + c.Type = field.TypeInt16 + case "integer": + c.Type = field.TypeInt32 + case "bigint": + c.Type = field.TypeInt64 + case "real": + c.Type = field.TypeFloat32 + case "double precision": + c.Type = field.TypeFloat64 + case "numeric", "decimal": + c.Type = field.TypeFloat64 + // If precision is specified then we should take that into account. + if numericPrecision.Valid { + schemaType := fmt.Sprintf("%s(%d,%d)", c.typ, numericPrecision.Int64, numericScale.Int64) + c.SchemaType = map[string]string{dialect.Postgres: schemaType} + } + case "text": + c.Type = field.TypeString + c.Size = maxCharSize + 1 + case "character", "character varying": + c.Type = field.TypeString + // If character maximum length is specified then we should take that into account. + if characterMaximumLen.Valid { + schemaType := fmt.Sprintf("varchar(%d)", characterMaximumLen.Int64) + c.SchemaType = map[string]string{dialect.Postgres: schemaType} + } + case "date", "time with time zone", "time without time zone", "timestamp with time zone", "timestamp without time zone": + c.Type = field.TypeTime + case "bytea": + c.Type = field.TypeBytes + case "jsonb": + c.Type = field.TypeJSON + case "uuid": + c.Type = field.TypeUUID + case "cidr", "inet", "macaddr", "macaddr8": + c.Type = field.TypeOther + case "point", "line", "lseg", "box", "path", "polygon", "circle": + c.Type = field.TypeOther + case "ARRAY": + c.Type = field.TypeOther + if !udt.Valid { + return fmt.Errorf("missing array type for column %q", c.Name) + } + // Note that for ARRAY types, the 'udt_name' column holds the array type + // prefixed with '_'. For example, for 'integer[]' the result is '_int', + // and for 'text[N][M]' the result is also '_text'. That's because, the + // database ignores any size or multi-dimensions constraints. + c.SchemaType = map[string]string{dialect.Postgres: "ARRAY"} + c.typ = udt.String + case "USER-DEFINED", "tstzrange", "interval": + c.Type = field.TypeOther + if !udt.Valid { + return fmt.Errorf("missing user defined type for column %q", c.Name) + } + c.SchemaType = map[string]string{dialect.Postgres: udt.String} + } + switch { + case !defaults.Valid || c.Type == field.TypeTime || callExpr(defaults.String): + return nil + case strings.Contains(defaults.String, "::"): + parts := strings.Split(defaults.String, "::") + defaults.String = strings.Trim(parts[0], "'") + fallthrough + default: + return c.ScanDefault(defaults.String) + } +} + +// tBuilder returns the TableBuilder for the given table. +func (d *Postgres) tBuilder(t *Table) *sql.TableBuilder { + b := sql.Dialect(dialect.Postgres). + CreateTable(t.Name).IfNotExists() + for _, c := range t.Columns { + b.Column(d.addColumn(c)) + } + for _, pk := range t.PrimaryKey { + b.PrimaryKey(pk.Name) + } + if t.Annotation != nil { + addChecks(b, t.Annotation) + } + return b +} + +// cType returns the PostgreSQL string type for this column. +func (d *Postgres) cType(c *Column) (t string) { + if c.SchemaType != nil && c.SchemaType[dialect.Postgres] != "" { + return c.SchemaType[dialect.Postgres] + } + switch c.Type { + case field.TypeBool: + t = "boolean" + case field.TypeUint8, field.TypeInt8, field.TypeInt16, field.TypeUint16: + t = "smallint" + case field.TypeInt32, field.TypeUint32: + t = "int" + case field.TypeInt, field.TypeUint, field.TypeInt64, field.TypeUint64: + t = "bigint" + case field.TypeFloat32: + t = c.scanTypeOr("real") + case field.TypeFloat64: + t = c.scanTypeOr("double precision") + case field.TypeBytes: + t = "bytea" + case field.TypeJSON: + t = "jsonb" + case field.TypeUUID: + t = "uuid" + case field.TypeString: + t = "varchar" + if c.Size > maxCharSize { + t = "text" + } + case field.TypeTime: + t = c.scanTypeOr("timestamp with time zone") + case field.TypeEnum: + // Currently, the support for enums is weak (application level only. + // like SQLite). Dialect needs to create and maintain its enum type. + t = "varchar" + case field.TypeOther: + t = c.typ + default: + panic(fmt.Sprintf("unsupported type %q for column %q", c.Type.String(), c.Name)) + } + return t +} + +// addColumn returns the ColumnBuilder for adding the given column to a table. +func (d *Postgres) addColumn(c *Column) *sql.ColumnBuilder { + b := sql.Dialect(dialect.Postgres). + Column(c.Name).Type(d.cType(c)).Attr(c.Attr) + c.unique(b) + if c.Increment { + b.Attr("GENERATED BY DEFAULT AS IDENTITY") + } + c.nullable(b) + d.writeDefault(b, c, "DEFAULT") + if c.Collation != "" { + b.Attr("COLLATE " + strconv.Quote(c.Collation)) + } + return b +} + +// writeDefault writes the `DEFAULT` clause to column builder +// if exists and supported by the driver. +func (d *Postgres) writeDefault(b *sql.ColumnBuilder, c *Column, clause string) { + if c.Default == nil || !c.supportDefault() { + return + } + attr := fmt.Sprint(c.Default) + switch v := c.Default.(type) { + case bool: + attr = strconv.FormatBool(v) + case string: + if t := c.Type; t != field.TypeUUID && t != field.TypeTime && !t.Numeric() { + // Escape single quote by replacing each with 2. + attr = fmt.Sprintf("'%s'", strings.ReplaceAll(v, "'", "''")) + } + } + b.Attr(clause + " " + attr) +} + +// alterColumn returns list of ColumnBuilder for applying in order to alter a column. +func (d *Postgres) alterColumn(c *Column) (ops []*sql.ColumnBuilder) { + b := sql.Dialect(dialect.Postgres) + ops = append(ops, b.Column(c.Name).Type(d.cType(c))) + if c.Nullable { + ops = append(ops, b.Column(c.Name).Attr("DROP NOT NULL")) + } else { + ops = append(ops, b.Column(c.Name).Attr("SET NOT NULL")) + } + if c.Default != nil && c.supportDefault() { + ops = append(ops, d.writeSetDefault(b.Column(c.Name), c)) + } + return ops +} + +func (d *Postgres) writeSetDefault(b *sql.ColumnBuilder, c *Column) *sql.ColumnBuilder { + d.writeDefault(b, c, "SET DEFAULT") + return b +} + +// hasUniqueName reports if the index has a unique name in the schema. +func hasUniqueName(i *Index) bool { + // Trim the "_key" suffix if it was added by Postgres for implicit indexes. + name := strings.TrimSuffix(i.Name, "_key") + suffix := strings.Join(i.columnNames(), "_") + if !strings.HasSuffix(name, suffix) { + return true // Assume it has a custom storage-key. + } + // The codegen prefixes by default indexes with the type name. + // For example, an index "users"("name"), will named as "user_name". + return name != suffix +} + +// addIndex returns the query for adding an index to PostgreSQL. +func (d *Postgres) addIndex(i *Index, table string) *sql.IndexBuilder { + name := i.Name + if !hasUniqueName(i) { + // Since index name should be unique in pg_class for schema, + // we prefix it with the table name and remove on read. + name = fmt.Sprintf("%s_%s", table, i.Name) + } + idx := sql.Dialect(dialect.Postgres). + CreateIndex(name).IfNotExists().Table(table) + if i.Unique { + idx.Unique() + } + for _, c := range i.Columns { + idx.Column(c.Name) + } + return idx +} + +// dropIndex drops a Postgres index. +func (d *Postgres) dropIndex(ctx context.Context, tx dialect.Tx, idx *Index, table string) error { + name := idx.Name + build := sql.Dialect(dialect.Postgres) + if prefix := table + "_"; !strings.HasPrefix(name, prefix) && !hasUniqueName(idx) { + name = prefix + name + } + query, args := sql.Dialect(dialect.Postgres). + Select(sql.Count("*")).From(sql.Table("table_constraints").Schema("information_schema")). + Where(sql.And( + d.matchSchema(), + sql.EQ("constraint_type", "UNIQUE"), + sql.EQ("constraint_name", name), + )). + Query() + exists, err := exist(ctx, tx, query, args...) + if err != nil { + return err + } + query, args = build.DropIndex(name).Query() + if exists { + query, args = build.AlterTable(table).DropConstraint(name).Query() + } + return tx.Exec(ctx, query, args, nil) +} + +// isImplicitIndex reports if the index was created implicitly for the unique column. +func (d *Postgres) isImplicitIndex(idx *Index, col *Column) bool { + return strings.TrimSuffix(idx.Name, "_key") == col.Name && col.Unique +} + +// renameColumn returns the statement for renaming a column. +func (d *Postgres) renameColumn(t *Table, old, new *Column) sql.Querier { + return sql.Dialect(dialect.Postgres). + AlterTable(t.Name). + RenameColumn(old.Name, new.Name) +} + +// renameIndex returns the statement for renaming an index. +func (d *Postgres) renameIndex(t *Table, old, new *Index) sql.Querier { + if sfx := "_key"; strings.HasSuffix(old.Name, sfx) && !strings.HasSuffix(new.Name, sfx) { + new.Name += sfx + } + if pfx := t.Name + "_"; strings.HasPrefix(old.realname, pfx) && !strings.HasPrefix(new.Name, pfx) { + new.Name = pfx + new.Name + } + return sql.Dialect(dialect.Postgres).AlterIndex(old.realname).Rename(new.Name) +} + +// matchSchema returns the predicate for matching table schema. +func (d *Postgres) matchSchema(columns ...string) *sql.Predicate { + column := "table_schema" + if len(columns) > 0 { + column = columns[0] + } + if d.schema != "" { + return sql.EQ(column, d.schema) + } + return sql.EQ(column, sql.Raw("CURRENT_SCHEMA()")) +} + +// tables returns the query for getting the in the schema. +func (d *Postgres) tables() sql.Querier { + return sql.Dialect(dialect.Postgres). + Select("table_name"). + From(sql.Table("tables").Schema("information_schema")). + Where(d.matchSchema()) +} + +// alterColumns returns the queries for applying the columns change-set. +func (d *Postgres) alterColumns(table string, add, modify, drop []*Column) sql.Queries { + b := sql.Dialect(dialect.Postgres).AlterTable(table) + for _, c := range add { + b.AddColumn(d.addColumn(c)) + } + for _, c := range modify { + b.ModifyColumns(d.alterColumn(c)...) + } + for _, c := range drop { + b.DropColumn(sql.Dialect(dialect.Postgres).Column(c.Name)) + } + if len(b.Queries) == 0 { + return nil + } + return sql.Queries{b} +} + +// needsConversion reports if column "old" needs to be converted +// (by table altering) to column "new". +func (d *Postgres) needsConversion(old, new *Column) bool { + oldT, newT := d.cType(old), d.cType(new) + return oldT != newT && (oldT != "ARRAY" || !arrayType(newT)) +} + +// callExpr reports if the given string ~looks like a function call expression. +func callExpr(s string) bool { + if parts := strings.Split(s, "::"); !strings.HasSuffix(s, ")") && strings.HasSuffix(parts[0], ")") { + s = parts[0] + } + i, j := strings.IndexByte(s, '('), strings.LastIndexByte(s, ')') + if i == -1 || i > j || j != len(s)-1 { + return false + } + for i, r := range s[:i] { + if !isAlpha(r, i > 0) { + return false + } + } + return true +} + +func isAlpha(r rune, digit bool) bool { + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' || digit && '0' <= r && r <= '9' +} + +// arrayType reports if the given string is an array type (e.g. int[], text[2]). +func arrayType(t string) bool { + i, j := strings.LastIndexByte(t, '['), strings.LastIndexByte(t, ']') + if i == -1 || j == -1 { + return false + } + for _, r := range t[i+1 : j] { + if !unicode.IsDigit(r) { + return false + } + } + return true +} + +// foreignKeys populates the tables foreign keys using the information_schema tables +func (d *Postgres) foreignKeys(ctx context.Context, tx dialect.Tx, tables []*Table) error { + var tableLookup = make(map[string]*Table) + for _, t := range tables { + tableLookup[t.Name] = t + } + for _, t := range tables { + rows := &sql.Rows{} + query := fmt.Sprintf(fkQuery, t.Name) + if err := tx.Query(ctx, query, []any{}, rows); err != nil { + return fmt.Errorf("querying foreign keys for table %s: %w", t.Name, err) + } + defer rows.Close() + var tableFksLookup = make(map[string]*ForeignKey) + for rows.Next() { + var tableSchema, constraintName, tableName, columnName, refTableSchema, refTableName, refColumnName string + if err := rows.Scan(&tableSchema, &constraintName, &tableName, &columnName, &refTableSchema, &refTableName, &refColumnName); err != nil { + return fmt.Errorf("scanning index description: %w", err) + } + refTable := tableLookup[refTableName] + if refTable == nil { + return fmt.Errorf("could not find table: %s", refTableName) + } + column, ok := t.column(columnName) + if !ok { + return fmt.Errorf("could not find column: %s on table: %s", columnName, tableName) + } + refColumn, ok := refTable.column(refColumnName) + if !ok { + return fmt.Errorf("could not find ref column: %s on ref table: %s", refTableName, refColumnName) + } + if fk, ok := tableFksLookup[constraintName]; ok { + if _, ok := fk.column(columnName); !ok { + fk.Columns = append(fk.Columns, column) + } + if _, ok := fk.refColumn(refColumnName); !ok { + fk.RefColumns = append(fk.RefColumns, refColumn) + } + } else { + newFk := &ForeignKey{ + Symbol: constraintName, + Columns: []*Column{column}, + RefTable: refTable, + RefColumns: []*Column{refColumn}, + } + tableFksLookup[constraintName] = newFk + t.AddForeignKey(newFk) + } + } + if err := rows.Close(); err != nil { + return err + } + if err := rows.Err(); err != nil { + return err + } + } + return nil +} + +// fkQuery holds a query format for retrieving +// foreign keys of the current schema. +const fkQuery = ` +SELECT tc.table_schema, + tc.constraint_name, + tc.table_name, + kcu.column_name, + ccu.table_schema AS foreign_table_schema, + ccu.table_name AS foreign_table_name, + ccu.column_name AS foreign_column_name +FROM information_schema.table_constraints AS tc + JOIN information_schema.key_column_usage AS kcu + ON tc.constraint_name = kcu.constraint_name + AND tc.table_schema = kcu.table_schema + JOIN information_schema.constraint_column_usage AS ccu + ON ccu.constraint_name = tc.constraint_name + AND ccu.table_schema = tc.table_schema +WHERE tc.constraint_type = 'FOREIGN KEY' + AND tc.table_name = '%s' +order by constraint_name, kcu.ordinal_position; +` + +// Atlas integration. + +func (d *Postgres) atOpen(conn dialect.ExecQuerier) (migrate.Driver, error) { + return postgres.Open(&db{ExecQuerier: conn}) +} + +func (d *Postgres) atTable(t1 *Table, t2 *schema.Table) { + if t1.Annotation != nil { + setAtChecks(t1, t2) + } +} + +func (d *Postgres) supportsDefault(*Column) bool { + // PostgreSQL supports default values for all standard types. + return true +} + +func (d *Postgres) atTypeC(c1 *Column, c2 *schema.Column) error { + if c1.SchemaType != nil && c1.SchemaType[dialect.Postgres] != "" { + t, err := postgres.ParseType(strings.ToLower(c1.SchemaType[dialect.Postgres])) + if err != nil { + return err + } + c2.Type.Type = t + if s, ok := t.(*postgres.SerialType); c1.foreign != nil && ok { + c2.Type.Type = s.IntegerType() + } + return nil + } + var t schema.Type + switch c1.Type { + case field.TypeBool: + t = &schema.BoolType{T: postgres.TypeBoolean} + case field.TypeUint8, field.TypeInt8, field.TypeInt16, field.TypeUint16: + t = &schema.IntegerType{T: postgres.TypeSmallInt} + case field.TypeInt32, field.TypeUint32: + t = &schema.IntegerType{T: postgres.TypeInt} + case field.TypeInt, field.TypeUint, field.TypeInt64, field.TypeUint64: + t = &schema.IntegerType{T: postgres.TypeBigInt} + case field.TypeFloat32: + t = &schema.FloatType{T: c1.scanTypeOr(postgres.TypeReal)} + case field.TypeFloat64: + t = &schema.FloatType{T: c1.scanTypeOr(postgres.TypeDouble)} + case field.TypeBytes: + t = &schema.BinaryType{T: postgres.TypeBytea} + case field.TypeUUID: + t = &postgres.UUIDType{T: postgres.TypeUUID} + case field.TypeJSON: + t = &schema.JSONType{T: postgres.TypeJSONB} + case field.TypeString: + t = &schema.StringType{T: postgres.TypeVarChar} + if c1.Size > maxCharSize { + t = &schema.StringType{T: postgres.TypeText} + } + case field.TypeTime: + t = &schema.TimeType{T: c1.scanTypeOr(postgres.TypeTimestampWTZ)} + case field.TypeEnum: + // Although atlas supports enum types, we keep backwards compatibility + // with previous versions of ent and use varchar (see cType). + t = &schema.StringType{T: postgres.TypeVarChar} + case field.TypeOther: + t = &schema.UnsupportedType{T: c1.typ} + default: + t, err := postgres.ParseType(strings.ToLower(c1.typ)) + if err != nil { + return err + } + c2.Type.Type = t + } + c2.Type.Type = t + return nil +} + +func (d *Postgres) atUniqueC(t1 *Table, c1 *Column, t2 *schema.Table, c2 *schema.Column) { + // For UNIQUE columns, PostgreSQL creates an implicit index named + // "
__key". + for _, idx := range t1.Indexes { + // Index also defined explicitly, and will be added in atIndexes. + if idx.Unique && d.atImplicitIndexName(idx, t1, c1) { + return + } + } + t2.AddIndexes(schema.NewUniqueIndex(fmt.Sprintf("%s_%s_key", t1.Name, c1.Name)).AddColumns(c2)) +} + +func (d *Postgres) atImplicitIndexName(idx *Index, t1 *Table, c1 *Column) bool { + p := fmt.Sprintf("%s_%s_key", t1.Name, c1.Name) + if idx.Name == p { + return true + } + i, err := strconv.ParseInt(strings.TrimPrefix(idx.Name, p), 10, 64) + return err == nil && i > 0 +} + +func (d *Postgres) atIncrementC(t *schema.Table, c *schema.Column) { + // Skip marking this column as an identity in case it is + // serial type or a default was already defined for it. + if _, ok := c.Type.Type.(*postgres.SerialType); ok || c.Default != nil { + t.Attrs = removeAttr(t.Attrs, reflect.TypeOf(&postgres.Identity{})) + return + } + id := &postgres.Identity{} + for _, a := range t.Attrs { + if a, ok := a.(*postgres.Identity); ok { + id = a + } + } + c.AddAttrs(id) +} + +func (d *Postgres) atIncrementT(t *schema.Table, v int64) { + t.AddAttrs(&postgres.Identity{Sequence: &postgres.Sequence{Start: v}}) +} + +// indexOpClass returns a map holding the operator-class mapping if exists. +func indexOpClass(idx *Index) map[string]string { + opc := make(map[string]string) + if idx.Annotation == nil { + return opc + } + // If operator-class (without a name) was defined on + // the annotation, map it to the single column index. + if idx.Annotation.OpClass != "" && len(idx.Columns) == 1 { + opc[idx.Columns[0].Name] = idx.Annotation.OpClass + } + for column, op := range idx.Annotation.OpClassColumns { + opc[column] = op + } + return opc +} + +func (d *Postgres) atIndex(idx1 *Index, t2 *schema.Table, idx2 *schema.Index) error { + opc := indexOpClass(idx1) + for _, c1 := range idx1.Columns { + c2, ok := t2.Column(c1.Name) + if !ok { + return fmt.Errorf("unexpected index %q column: %q", idx1.Name, c1.Name) + } + part := &schema.IndexPart{C: c2} + if v, ok := opc[c1.Name]; ok { + var op postgres.IndexOpClass + if err := op.UnmarshalText([]byte(v)); err != nil { + return fmt.Errorf("unmarshalling operator-class %q for column %q: %v", v, c1.Name, err) + } + part.Attrs = append(part.Attrs, &op) + } + idx2.AddParts(part) + } + if t, ok := indexType(idx1, dialect.Postgres); ok { + idx2.AddAttrs(&postgres.IndexType{T: t}) + } + if ant, supportsInclude := idx1.Annotation, compareVersions(d.version, "11.0.0") >= 0; ant != nil && len(ant.IncludeColumns) > 0 && supportsInclude { + columns := make([]*schema.Column, len(ant.IncludeColumns)) + for i, ic := range ant.IncludeColumns { + c, ok := t2.Column(ic) + if !ok { + return fmt.Errorf("include column %q was not found for index %q", ic, idx1.Name) + } + columns[i] = c + } + idx2.AddAttrs(&postgres.IndexInclude{Columns: columns}) + } + if idx1.Annotation != nil && idx1.Annotation.Where != "" { + idx2.AddAttrs(&postgres.IndexPredicate{P: idx1.Annotation.Where}) + } + return nil +} + +func (Postgres) atTypeRangeSQL(ts ...string) string { + for i := range ts { + ts[i] = fmt.Sprintf("('%s')", ts[i]) + } + return fmt.Sprintf(`INSERT INTO "%s" ("type") VALUES %s`, TypeTable, strings.Join(ts, ", ")) +} diff --git a/vendor/entgo.io/ent/dialect/sql/schema/schema.go b/vendor/entgo.io/ent/dialect/sql/schema/schema.go new file mode 100644 index 00000000..2b613a47 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/schema/schema.go @@ -0,0 +1,690 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +// Package schema contains all schema migration logic for SQL dialects. +package schema + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/schema/field" +) + +const ( + // DefaultStringLen describes the default length for string/varchar types. + DefaultStringLen int64 = 255 + // Null is the string representation of NULL in SQL. + Null = "NULL" + // PrimaryKey is the string representation of PKs in SQL. + PrimaryKey = "PRI" + // UniqueKey is the string representation of PKs in SQL. + UniqueKey = "UNI" +) + +// Table schema definition for SQL dialects. +type Table struct { + Name string + Columns []*Column + columns map[string]*Column + Indexes []*Index + PrimaryKey []*Column + ForeignKeys []*ForeignKey + Annotation *entsql.Annotation + Comment string +} + +// NewTable returns a new table with the given name. +func NewTable(name string) *Table { + return &Table{ + Name: name, + columns: make(map[string]*Column), + } +} + +// SetComment sets the table comment. +func (t *Table) SetComment(c string) *Table { + t.Comment = c + return t +} + +// AddPrimary adds a new primary key to the table. +func (t *Table) AddPrimary(c *Column) *Table { + c.Key = PrimaryKey + t.AddColumn(c) + t.PrimaryKey = append(t.PrimaryKey, c) + return t +} + +// AddForeignKey adds a foreign key to the table. +func (t *Table) AddForeignKey(fk *ForeignKey) *Table { + t.ForeignKeys = append(t.ForeignKeys, fk) + return t +} + +// AddColumn adds a new column to the table. +func (t *Table) AddColumn(c *Column) *Table { + t.columns[c.Name] = c + t.Columns = append(t.Columns, c) + return t +} + +// HasColumn reports if the table contains a column with the given name. +func (t *Table) HasColumn(name string) bool { + _, ok := t.Column(name) + return ok +} + +// Column returns the column with the given name. If exists. +func (t *Table) Column(name string) (*Column, bool) { + if c, ok := t.columns[name]; ok { + return c, true + } + // In case the column was added + // directly to the Columns field. + for _, c := range t.Columns { + if c.Name == name { + return c, true + } + } + return nil, false +} + +// SetAnnotation the entsql.Annotation on the table. +func (t *Table) SetAnnotation(ant *entsql.Annotation) *Table { + t.Annotation = ant + return t +} + +// AddIndex creates and adds a new index to the table from the given options. +func (t *Table) AddIndex(name string, unique bool, columns []string) *Table { + return t.addIndex(&Index{ + Name: name, + Unique: unique, + columns: columns, + Columns: make([]*Column, 0, len(columns)), + }) +} + +// AddIndex creates and adds a new index to the table from the given options. +func (t *Table) addIndex(idx *Index) *Table { + for _, name := range idx.columns { + c, ok := t.columns[name] + if ok { + c.indexes.append(idx) + idx.Columns = append(idx.Columns, c) + } + } + t.Indexes = append(t.Indexes, idx) + return t +} + +// column returns a table column by its name. +// faster than map lookup for most cases. +func (t *Table) column(name string) (*Column, bool) { + for _, c := range t.Columns { + if c.Name == name { + return c, true + } + } + return nil, false +} + +// Index returns a table index by its exact name. +func (t *Table) Index(name string) (*Index, bool) { + idx, ok := t.index(name) + if ok && idx.Name == name { + return idx, ok + } + return nil, false +} + +// index returns a table index by its name. +func (t *Table) index(name string) (*Index, bool) { + for _, idx := range t.Indexes { + if name == idx.Name || name == idx.realname { + return idx, true + } + // Same as below, there are cases where the index name + // is unknown (created automatically on column constraint). + if len(idx.Columns) == 1 && idx.Columns[0].Name == name { + return idx, true + } + } + // If it is an "implicit index" (unique constraint on + // table creation) and it wasn't loaded in table scanning. + c, ok := t.column(name) + if !ok { + // Postgres naming convention for unique constraint (
__key). + name = strings.TrimPrefix(name, t.Name+"_") + name = strings.TrimSuffix(name, "_key") + c, ok = t.column(name) + } + if ok && c.Unique { + return &Index{Name: name, Unique: c.Unique, Columns: []*Column{c}, columns: []string{c.Name}}, true + } + return nil, false +} + +// hasIndex reports if the table has at least one index that matches the given names. +func (t *Table) hasIndex(names ...string) bool { + for i := range names { + if names[i] == "" { + continue + } + if _, ok := t.index(names[i]); ok { + return true + } + } + return false +} + +// fk returns a table foreign-key by its symbol. +// faster than map lookup for most cases. +func (t *Table) fk(symbol string) (*ForeignKey, bool) { + for _, fk := range t.ForeignKeys { + if fk.Symbol == symbol { + return fk, true + } + } + return nil, false +} + +// CopyTables returns a deep-copy of the given tables. This utility function is +// useful for copying the generated schema tables (i.e. migrate.Tables) before +// running schema migration when there is a need for execute multiple migrations +// concurrently. e.g. running parallel unit-tests using the generated enttest package. +func CopyTables(tables []*Table) ([]*Table, error) { + var ( + copyT = make([]*Table, len(tables)) + byName = make(map[string]*Table) + ) + for i, t := range tables { + copyT[i] = &Table{ + Name: t.Name, + Columns: make([]*Column, len(t.Columns)), + Indexes: make([]*Index, len(t.Indexes)), + ForeignKeys: make([]*ForeignKey, len(t.ForeignKeys)), + } + for j, c := range t.Columns { + cc := *c + // SchemaType and Enums are read-only fields. + cc.indexes = nil + cc.foreign = nil + copyT[i].Columns[j] = &cc + } + if at := t.Annotation; at != nil { + cat := *at + copyT[i].Annotation = &cat + } + byName[t.Name] = copyT[i] + } + for i, t := range tables { + ct := copyT[i] + for _, c := range t.PrimaryKey { + cc, ok := ct.column(c.Name) + if !ok { + return nil, fmt.Errorf("sql/schema: missing primary key column %q", c.Name) + } + ct.PrimaryKey = append(ct.PrimaryKey, cc) + } + for j, idx := range t.Indexes { + cidx := &Index{ + Name: idx.Name, + Unique: idx.Unique, + Columns: make([]*Column, len(idx.Columns)), + } + if at := idx.Annotation; at != nil { + cat := *at + cidx.Annotation = &cat + } + for k, c := range idx.Columns { + cc, ok := ct.column(c.Name) + if !ok { + return nil, fmt.Errorf("sql/schema: missing index column %q", c.Name) + } + cidx.Columns[k] = cc + } + ct.Indexes[j] = cidx + } + for j, fk := range t.ForeignKeys { + cfk := &ForeignKey{ + Symbol: fk.Symbol, + OnUpdate: fk.OnUpdate, + OnDelete: fk.OnDelete, + Columns: make([]*Column, len(fk.Columns)), + RefColumns: make([]*Column, len(fk.RefColumns)), + } + for k, c := range fk.Columns { + cc, ok := ct.column(c.Name) + if !ok { + return nil, fmt.Errorf("sql/schema: missing foreign-key column %q", c.Name) + } + cfk.Columns[k] = cc + } + cref, ok := byName[fk.RefTable.Name] + if !ok { + return nil, fmt.Errorf("sql/schema: missing foreign-key ref-table %q", fk.RefTable.Name) + } + cfk.RefTable = cref + for k, c := range fk.RefColumns { + cc, ok := cref.column(c.Name) + if !ok { + return nil, fmt.Errorf("sql/schema: missing foreign-key ref-column %q", c.Name) + } + cfk.RefColumns[k] = cc + } + ct.ForeignKeys[j] = cfk + } + } + return copyT, nil +} + +// Column schema definition for SQL dialects. +type Column struct { + Name string // column name. + Type field.Type // column type. + SchemaType map[string]string // optional schema type per dialect. + Attr string // extra attributes. + Size int64 // max size parameter for string, blob, etc. + Key string // key definition (PRI, UNI or MUL). + Unique bool // column with unique constraint. + Increment bool // auto increment attribute. + Nullable bool // null or not null attribute. + Default any // default value. + Enums []string // enum values. + Collation string // collation type (utf8mb4_unicode_ci, utf8mb4_general_ci) + typ string // row column type (used for Rows.Scan). + indexes Indexes // linked indexes. + foreign *ForeignKey // linked foreign-key. + Comment string // optional column comment. +} + +// Expr represents a raw expression. It is used to distinguish between +// literal values and raw expressions when defining default values. +type Expr string + +// UniqueKey returns boolean indicates if this column is a unique key. +// Used by the migration tool when parsing the `DESCRIBE TABLE` output Go objects. +func (c *Column) UniqueKey() bool { return c.Key == UniqueKey } + +// PrimaryKey returns boolean indicates if this column is on of the primary key columns. +// Used by the migration tool when parsing the `DESCRIBE TABLE` output Go objects. +func (c *Column) PrimaryKey() bool { return c.Key == PrimaryKey } + +// ConvertibleTo reports whether a column can be converted to the new column without altering its data. +func (c *Column) ConvertibleTo(d *Column) bool { + switch { + case c.Type == d.Type: + if c.Size != 0 && d.Size != 0 { + // Types match and have a size constraint. + return c.Size <= d.Size + } + return true + case c.IntType() && d.IntType() || c.UintType() && d.UintType(): + return c.Type <= d.Type + case c.UintType() && d.IntType(): + // uintX can not be converted to intY, when X > Y. + return c.Type-field.TypeUint8 <= d.Type-field.TypeInt8 + case c.Type == field.TypeString && d.Type == field.TypeEnum || + c.Type == field.TypeEnum && d.Type == field.TypeString: + return true + case c.Type.Integer() && d.Type == field.TypeString: + return true + } + return c.FloatType() && d.FloatType() +} + +// IntType reports whether the column is an int type (int8 ... int64). +func (c Column) IntType() bool { return c.Type >= field.TypeInt8 && c.Type <= field.TypeInt64 } + +// UintType reports of the given type is a uint type (int8 ... int64). +func (c Column) UintType() bool { return c.Type >= field.TypeUint8 && c.Type <= field.TypeUint64 } + +// FloatType reports of the given type is a float type (float32, float64). +func (c Column) FloatType() bool { return c.Type == field.TypeFloat32 || c.Type == field.TypeFloat64 } + +// ScanDefault scans the default value string to its interface type. +func (c *Column) ScanDefault(value string) error { + switch { + case strings.ToUpper(value) == Null: // ignore. + case c.IntType(): + v := &sql.NullInt64{} + if err := v.Scan(value); err != nil { + return fmt.Errorf("scanning int value for column %q: %w", c.Name, err) + } + c.Default = v.Int64 + case c.UintType(): + v := &sql.NullInt64{} + if err := v.Scan(value); err != nil { + return fmt.Errorf("scanning uint value for column %q: %w", c.Name, err) + } + c.Default = uint64(v.Int64) + case c.FloatType(): + v := &sql.NullFloat64{} + if err := v.Scan(value); err != nil { + return fmt.Errorf("scanning float value for column %q: %w", c.Name, err) + } + c.Default = v.Float64 + case c.Type == field.TypeBool: + v := &sql.NullBool{} + if err := v.Scan(value); err != nil { + return fmt.Errorf("scanning bool value for column %q: %w", c.Name, err) + } + c.Default = v.Bool + case c.Type == field.TypeString || c.Type == field.TypeEnum: + v := &sql.NullString{} + if err := v.Scan(value); err != nil { + return fmt.Errorf("scanning string value for column %q: %w", c.Name, err) + } + c.Default = v.String + case c.Type == field.TypeJSON: + v := &sql.NullString{} + if err := v.Scan(value); err != nil { + return fmt.Errorf("scanning json value for column %q: %w", c.Name, err) + } + c.Default = v.String + case c.Type == field.TypeBytes: + c.Default = []byte(value) + case c.Type == field.TypeUUID: + // skip function + if !strings.Contains(value, "()") { + c.Default = value + } + default: + return fmt.Errorf("unsupported default type: %v default to %q", c.Type, value) + } + return nil +} + +// defaultValue adds the `DEFAULT` attribute to the column. +// Note that, in SQLite if a NOT NULL constraint is specified, +// then the column must have a default value which not NULL. +func (c *Column) defaultValue(b *sql.ColumnBuilder) { + if c.Default == nil || !c.supportDefault() { + return + } + // Has default and the database supports adding this default. + attr := fmt.Sprint(c.Default) + switch v := c.Default.(type) { + case bool: + attr = strconv.FormatBool(v) + case string: + if t := c.Type; t != field.TypeUUID && t != field.TypeTime { + // Escape single quote by replacing each with 2. + attr = fmt.Sprintf("'%s'", strings.ReplaceAll(v, "'", "''")) + } + } + b.Attr("DEFAULT " + attr) +} + +// supportDefault reports if the column type supports default value. +func (c Column) supportDefault() bool { + switch t := c.Type; t { + case field.TypeString, field.TypeEnum: + return c.Size < 1<<16 // not a text. + case field.TypeBool, field.TypeTime, field.TypeUUID: + return true + default: + return t.Numeric() + } +} + +// unique adds the `UNIQUE` attribute if the column is a unique type. +// it is exist in a different function to share the common declaration +// between the two dialects. +func (c *Column) unique(b *sql.ColumnBuilder) { + if c.Unique { + b.Attr("UNIQUE") + } +} + +// nullable adds the `NULL`/`NOT NULL` attribute to the column if it exists in +// a different function to share the common declaration between the two dialects. +func (c *Column) nullable(b *sql.ColumnBuilder) { + attr := Null + if !c.Nullable { + attr = "NOT " + attr + } + b.Attr(attr) +} + +// scanTypeOr returns the scanning type or the given value. +func (c *Column) scanTypeOr(t string) string { + if c.typ != "" { + return strings.ToLower(c.typ) + } + return t +} + +// ForeignKey definition for creation. +type ForeignKey struct { + Symbol string // foreign-key name. Generated if empty. + Columns []*Column // table column + RefTable *Table // referenced table. + RefColumns []*Column // referenced columns. + OnUpdate ReferenceOption // action on update. + OnDelete ReferenceOption // action on delete. +} + +func (fk ForeignKey) column(name string) (*Column, bool) { + for _, c := range fk.Columns { + if c.Name == name { + return c, true + } + } + return nil, false +} + +func (fk ForeignKey) refColumn(name string) (*Column, bool) { + for _, c := range fk.RefColumns { + if c.Name == name { + return c, true + } + } + return nil, false +} + +// DSL returns a default DSL query for a foreign-key. +func (fk ForeignKey) DSL() *sql.ForeignKeyBuilder { + cols := make([]string, len(fk.Columns)) + refs := make([]string, len(fk.RefColumns)) + for i, c := range fk.Columns { + cols[i] = c.Name + } + for i, c := range fk.RefColumns { + refs[i] = c.Name + } + dsl := sql.ForeignKey().Symbol(fk.Symbol). + Columns(cols...). + Reference(sql.Reference().Table(fk.RefTable.Name).Columns(refs...)) + if action := string(fk.OnDelete); action != "" { + dsl.OnDelete(action) + } + if action := string(fk.OnUpdate); action != "" { + dsl.OnUpdate(action) + } + return dsl +} + +// ReferenceOption for constraint actions. +type ReferenceOption string + +// Reference options. +const ( + NoAction ReferenceOption = "NO ACTION" + Restrict ReferenceOption = "RESTRICT" + Cascade ReferenceOption = "CASCADE" + SetNull ReferenceOption = "SET NULL" + SetDefault ReferenceOption = "SET DEFAULT" +) + +// ConstName returns the constant name of a reference option. It's used by entc for printing the constant name in templates. +func (r ReferenceOption) ConstName() string { + return strings.ReplaceAll(strings.Title(strings.ToLower(string(r))), " ", "") +} + +// Index definition for table index. +type Index struct { + Name string // index name. + Unique bool // uniqueness. + Columns []*Column // actual table columns. + Annotation *entsql.IndexAnnotation // index annotation. + columns []string // columns loaded from query scan. + primary bool // primary key index. + realname string // real name in the database (Postgres only). +} + +// Builder returns the query builder for index creation. The DSL is identical in all dialects. +func (i *Index) Builder(table string) *sql.IndexBuilder { + idx := sql.CreateIndex(i.Name).Table(table) + if i.Unique { + idx.Unique() + } + for _, c := range i.Columns { + idx.Column(c.Name) + } + return idx +} + +// DropBuilder returns the query builder for the drop index. +func (i *Index) DropBuilder(table string) *sql.DropIndexBuilder { + idx := sql.DropIndex(i.Name).Table(table) + return idx +} + +// sameAs reports if the index has the same properties +// as the given index (except the name). +func (i *Index) sameAs(idx *Index) bool { + if i.Unique != idx.Unique || len(i.Columns) != len(idx.Columns) { + return false + } + for j, c := range i.Columns { + if c.Name != idx.Columns[j].Name { + return false + } + } + return true +} + +// columnNames returns the names of the columns of the index. +func (i *Index) columnNames() []string { + if len(i.columns) > 0 { + return i.columns + } + columns := make([]string, 0, len(i.Columns)) + for _, c := range i.Columns { + columns = append(columns, c.Name) + } + return columns +} + +// Indexes used for scanning all sql.Rows into a list of indexes, because +// multiple sql rows can represent the same index (multi-columns indexes). +type Indexes []*Index + +// append wraps the basic `append` function by filtering duplicates indexes. +func (i *Indexes) append(idx1 *Index) { + for _, idx2 := range *i { + if idx2.Name == idx1.Name { + return + } + } + *i = append(*i, idx1) +} + +// compareVersions returns an integer comparing the 2 versions. +func compareVersions(v1, v2 string) int { + pv1, ok1 := parseVersion(v1) + pv2, ok2 := parseVersion(v2) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return 1 + } + if v := compare(pv1.major, pv2.major); v != 0 { + return v + } + if v := compare(pv1.minor, pv2.minor); v != 0 { + return v + } + return compare(pv1.patch, pv2.patch) +} + +// version represents a parsed MySQL version. +type version struct { + major int + minor int + patch int +} + +// parseVersion returns an integer comparing the 2 versions. +func parseVersion(v string) (*version, bool) { + parts := strings.Split(v, ".") + if len(parts) == 0 { + return nil, false + } + var ( + err error + ver = &version{} + ) + for i, e := range []*int{&ver.major, &ver.minor, &ver.patch} { + if i == len(parts) { + break + } + if *e, err = strconv.Atoi(strings.Split(parts[i], "-")[0]); err != nil { + return nil, false + } + } + return ver, true +} + +func compare(v1, v2 int) int { + if v1 == v2 { + return 0 + } + if v1 < v2 { + return -1 + } + return 1 +} + +// addChecks appends the CHECK clauses from the entsql.Annotation. +func addChecks(t *sql.TableBuilder, ant *entsql.Annotation) { + if check := ant.Check; check != "" { + t.Checks(func(b *sql.Builder) { + b.WriteString("CHECK " + checkExpr(check)) + }) + } + if checks := ant.Checks; len(ant.Checks) > 0 { + names := make([]string, 0, len(checks)) + for name := range checks { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + name := name + t.Checks(func(b *sql.Builder) { + b.WriteString("CONSTRAINT ").Ident(name).WriteString(" CHECK " + checkExpr(checks[name])) + }) + } + } +} + +// checkExpr formats the CHECK expression. +func checkExpr(expr string) string { + expr = strings.TrimSpace(expr) + if !strings.HasPrefix(expr, "(") && !strings.HasSuffix(expr, ")") { + expr = "(" + expr + ")" + } + return expr +} diff --git a/vendor/entgo.io/ent/dialect/sql/schema/sqlite.go b/vendor/entgo.io/ent/dialect/sql/schema/sqlite.go new file mode 100644 index 00000000..5e315cd0 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/schema/sqlite.go @@ -0,0 +1,528 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +import ( + "context" + stdsql "database/sql" + "fmt" + "reflect" + "strconv" + "strings" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/schema/field" + + "ariga.io/atlas/sql/migrate" + "ariga.io/atlas/sql/schema" + "ariga.io/atlas/sql/sqlite" +) + +type ( + // SQLite is an SQLite migration driver. + SQLite struct { + dialect.Driver + WithForeignKeys bool + } + // SQLiteTx implements dialect.Tx. + SQLiteTx struct { + dialect.Tx + commit func() error // Override Commit to toggle foreign keys back on after Commit. + rollback func() error // Override Rollback to toggle foreign keys back on after Rollback. + } +) + +// Tx implements opens a transaction. +func (d *SQLite) Tx(ctx context.Context) (dialect.Tx, error) { + db := &db{d} + if _, err := db.ExecContext(ctx, "PRAGMA foreign_keys = off"); err != nil { + return nil, fmt.Errorf("sqlite: set 'foreign_keys = off': %w", err) + } + t, err := d.Driver.Tx(ctx) + if err != nil { + return nil, err + } + tx := &tx{t} + cm, err := sqlite.CommitFunc(ctx, db, tx, true) + if err != nil { + return nil, err + } + return &SQLiteTx{Tx: t, commit: cm, rollback: sqlite.RollbackFunc(ctx, db, tx, true)}, nil +} + +// Commit ensures foreign keys are toggled back on after commit. +func (tx *SQLiteTx) Commit() error { + return tx.commit() +} + +// Rollback ensures foreign keys are toggled back on after rollback. +func (tx *SQLiteTx) Rollback() error { + return tx.rollback() +} + +// init makes sure that foreign_keys support is enabled. +func (d *SQLite) init(ctx context.Context) error { + on, err := exist(ctx, d, "PRAGMA foreign_keys") + if err != nil { + return fmt.Errorf("sqlite: check foreign_keys pragma: %w", err) + } + if !on { + // foreign_keys pragma is off, either enable it by execute "PRAGMA foreign_keys=ON" + // or add the following parameter in the connection string "_fk=1". + return fmt.Errorf("sqlite: foreign_keys pragma is off: missing %q in the connection string", "_fk=1") + } + return nil +} + +func (d *SQLite) tableExist(ctx context.Context, conn dialect.ExecQuerier, name string) (bool, error) { + query, args := sql.Select().Count(). + From(sql.Table("sqlite_master")). + Where(sql.And( + sql.EQ("type", "table"), + sql.EQ("name", name), + )). + Query() + return exist(ctx, conn, query, args...) +} + +// setRange sets the start value of table PK. +// SQLite tracks the AUTOINCREMENT in the "sqlite_sequence" table that is created and initialized automatically +// whenever a table that contains an AUTOINCREMENT column is created. However, it populates to it a rows (for tables) +// only after the first insertion. Therefore, we check. If a record (for the given table) already exists in the "sqlite_sequence" +// table, we updated it. Otherwise, we insert a new value. +func (d *SQLite) setRange(ctx context.Context, conn dialect.ExecQuerier, t *Table, value int64) error { + query, args := sql.Select().Count(). + From(sql.Table("sqlite_sequence")). + Where(sql.EQ("name", t.Name)). + Query() + exists, err := exist(ctx, conn, query, args...) + switch { + case err != nil: + return err + case exists: + query, args = sql.Update("sqlite_sequence").Set("seq", value).Where(sql.EQ("name", t.Name)).Query() + default: // !exists + query, args = sql.Insert("sqlite_sequence").Columns("name", "seq").Values(t.Name, value).Query() + } + return conn.Exec(ctx, query, args, nil) +} + +func (d *SQLite) tBuilder(t *Table) *sql.TableBuilder { + b := sql.CreateTable(t.Name) + for _, c := range t.Columns { + b.Column(d.addColumn(c)) + } + if t.Annotation != nil { + addChecks(b, t.Annotation) + } + // Unlike in MySQL, we're not able to add foreign-key constraints to table + // after it was created, and adding them to the `CREATE TABLE` statement is + // not always valid (because circular foreign-keys situation is possible). + // We stay consistent by not using constraints at all, and just defining the + // foreign keys in the `CREATE TABLE` statement. + if d.WithForeignKeys { + for _, fk := range t.ForeignKeys { + b.ForeignKeys(fk.DSL()) + } + } + // If it's an ID based primary key with autoincrement, we add + // the `PRIMARY KEY` clause to the column declaration. Otherwise, + // we append it to the constraint clause. + if len(t.PrimaryKey) == 1 && t.PrimaryKey[0].Increment { + return b + } + for _, pk := range t.PrimaryKey { + b.PrimaryKey(pk.Name) + } + return b +} + +// cType returns the SQLite string type for the given column. +func (*SQLite) cType(c *Column) (t string) { + if c.SchemaType != nil && c.SchemaType[dialect.SQLite] != "" { + return c.SchemaType[dialect.SQLite] + } + switch c.Type { + case field.TypeBool: + t = "bool" + case field.TypeInt8, field.TypeUint8, field.TypeInt16, field.TypeUint16, field.TypeInt32, + field.TypeUint32, field.TypeUint, field.TypeInt, field.TypeInt64, field.TypeUint64: + t = "integer" + case field.TypeBytes: + t = "blob" + case field.TypeString, field.TypeEnum: + // SQLite does not impose any length restrictions on + // the length of strings, BLOBs or numeric values. + t = fmt.Sprintf("varchar(%d)", DefaultStringLen) + case field.TypeFloat32, field.TypeFloat64: + t = "real" + case field.TypeTime: + t = "datetime" + case field.TypeJSON: + t = "json" + case field.TypeUUID: + t = "uuid" + case field.TypeOther: + t = c.typ + default: + panic(fmt.Sprintf("unsupported type %q for column %q", c.Type, c.Name)) + } + return t +} + +// addColumn returns the DSL query for adding the given column to a table. +func (d *SQLite) addColumn(c *Column) *sql.ColumnBuilder { + b := sql.Column(c.Name).Type(d.cType(c)).Attr(c.Attr) + c.unique(b) + if c.PrimaryKey() && c.Increment { + b.Attr("PRIMARY KEY AUTOINCREMENT") + } + c.nullable(b) + c.defaultValue(b) + return b +} + +// addIndex returns the query for adding an index to SQLite. +func (d *SQLite) addIndex(i *Index, table string) *sql.IndexBuilder { + return i.Builder(table).IfNotExists() +} + +// dropIndex drops a SQLite index. +func (d *SQLite) dropIndex(ctx context.Context, tx dialect.Tx, idx *Index, table string) error { + query, args := idx.DropBuilder("").Query() + return tx.Exec(ctx, query, args, nil) +} + +// fkExist returns always true to disable foreign-keys creation after the table was created. +func (d *SQLite) fkExist(context.Context, dialect.Tx, string) (bool, error) { return true, nil } + +// table returns always error to indicate that SQLite dialect doesn't support incremental migration. +func (d *SQLite) table(ctx context.Context, tx dialect.Tx, name string) (*Table, error) { + rows := &sql.Rows{} + query, args := sql.Select("name", "type", "notnull", "dflt_value", "pk"). + From(sql.Table(fmt.Sprintf("pragma_table_info('%s')", name)).Unquote()). + OrderBy("pk"). + Query() + if err := tx.Query(ctx, query, args, rows); err != nil { + return nil, fmt.Errorf("sqlite: reading table description %w", err) + } + // Call Close in cases of failures (Close is idempotent). + defer rows.Close() + t := NewTable(name) + for rows.Next() { + c := &Column{} + if err := d.scanColumn(c, rows); err != nil { + return nil, fmt.Errorf("sqlite: %w", err) + } + if c.PrimaryKey() { + t.PrimaryKey = append(t.PrimaryKey, c) + } + t.AddColumn(c) + } + if err := rows.Err(); err != nil { + return nil, err + } + if err := rows.Close(); err != nil { + return nil, fmt.Errorf("sqlite: closing rows %w", err) + } + indexes, err := d.indexes(ctx, tx, name) + if err != nil { + return nil, err + } + // Add and link indexes to table columns. + for _, idx := range indexes { + switch { + case idx.primary: + case idx.Unique && len(idx.columns) == 1: + name := idx.columns[0] + c, ok := t.column(name) + if !ok { + return nil, fmt.Errorf("index %q column %q was not found in table %q", idx.Name, name, t.Name) + } + c.Key = UniqueKey + c.Unique = true + fallthrough + default: + t.addIndex(idx) + } + } + return t, nil +} + +// table loads the table indexes from the database. +func (d *SQLite) indexes(ctx context.Context, tx dialect.Tx, name string) (Indexes, error) { + rows := &sql.Rows{} + query, args := sql.Select("name", "unique", "origin"). + From(sql.Table(fmt.Sprintf("pragma_index_list('%s')", name)).Unquote()). + Query() + if err := tx.Query(ctx, query, args, rows); err != nil { + return nil, fmt.Errorf("reading table indexes %w", err) + } + defer rows.Close() + var idx Indexes + for rows.Next() { + i := &Index{} + origin := sql.NullString{} + if err := rows.Scan(&i.Name, &i.Unique, &origin); err != nil { + return nil, fmt.Errorf("scanning index description %w", err) + } + i.primary = origin.String == "pk" + idx = append(idx, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + if err := rows.Close(); err != nil { + return nil, fmt.Errorf("closing rows %w", err) + } + for i := range idx { + columns, err := d.indexColumns(ctx, tx, idx[i].Name) + if err != nil { + return nil, err + } + idx[i].columns = columns + // Normalize implicit index names to ent naming convention. See: + // https://github.com/sqlite/sqlite/blob/e937df8/src/build.c#L3583 + if len(columns) == 1 && strings.HasPrefix(idx[i].Name, "sqlite_autoindex_"+name) { + idx[i].Name = columns[0] + } + } + return idx, nil +} + +// indexColumns loads index columns from index info. +func (d *SQLite) indexColumns(ctx context.Context, tx dialect.Tx, name string) ([]string, error) { + rows := &sql.Rows{} + query, args := sql.Select("name"). + From(sql.Table(fmt.Sprintf("pragma_index_info('%s')", name)).Unquote()). + OrderBy("seqno"). + Query() + if err := tx.Query(ctx, query, args, rows); err != nil { + return nil, fmt.Errorf("reading table indexes %w", err) + } + defer rows.Close() + var names []string + if err := sql.ScanSlice(rows, &names); err != nil { + return nil, err + } + return names, nil +} + +// scanColumn scans the column information from SQLite column description. +func (d *SQLite) scanColumn(c *Column, rows *sql.Rows) error { + var ( + pk sql.NullInt64 + notnull sql.NullInt64 + defaults sql.NullString + ) + if err := rows.Scan(&c.Name, &c.typ, ¬null, &defaults, &pk); err != nil { + return fmt.Errorf("scanning column description: %w", err) + } + c.Nullable = notnull.Int64 == 0 + if pk.Int64 > 0 { + c.Key = PrimaryKey + } + if c.typ == "" { + return fmt.Errorf("missing type information for column %q", c.Name) + } + parts, size, _, err := parseColumn(c.typ) + if err != nil { + return err + } + switch strings.ToLower(parts[0]) { + case "bool", "boolean": + c.Type = field.TypeBool + case "blob": + c.Type = field.TypeBytes + case "integer": + // All integer types have the same "type affinity". + c.Type = field.TypeInt + case "real", "float", "double": + c.Type = field.TypeFloat64 + case "datetime": + c.Type = field.TypeTime + case "json": + c.Type = field.TypeJSON + case "uuid": + c.Type = field.TypeUUID + case "varchar", "char", "text": + c.Size = size + c.Type = field.TypeString + case "decimal", "numeric": + c.Type = field.TypeOther + } + if defaults.Valid { + return c.ScanDefault(defaults.String) + } + return nil +} + +// alterColumns returns the queries for applying the columns change-set. +func (d *SQLite) alterColumns(table string, add, _, _ []*Column) sql.Queries { + queries := make(sql.Queries, 0, len(add)) + for i := range add { + c := d.addColumn(add[i]) + if fk := add[i].foreign; fk != nil { + c.Constraint(fk.DSL()) + } + queries = append(queries, sql.Dialect(dialect.SQLite).AlterTable(table).AddColumn(c)) + } + // Modifying and dropping columns is not supported and disabled until we + // will support https://www.sqlite.org/lang_altertable.html#otheralter + return queries +} + +// tables returns the query for getting the in the schema. +func (d *SQLite) tables() sql.Querier { + return sql.Select("name"). + From(sql.Table("sqlite_schema")). + Where(sql.EQ("type", "table")) +} + +// needsConversion reports if column "old" needs to be converted +// (by table altering) to column "new". +func (d *SQLite) needsConversion(old, new *Column) bool { + c1, c2 := d.cType(old), d.cType(new) + return c1 != c2 && old.typ != c2 +} + +// Atlas integration. + +func (d *SQLite) atOpen(conn dialect.ExecQuerier) (migrate.Driver, error) { + return sqlite.Open(&db{ExecQuerier: conn}) +} + +func (d *SQLite) atTable(t1 *Table, t2 *schema.Table) { + if t1.Annotation != nil { + setAtChecks(t1, t2) + } +} + +func (d *SQLite) supportsDefault(*Column) bool { + // SQLite supports default values for all standard types. + return true +} + +func (d *SQLite) atTypeC(c1 *Column, c2 *schema.Column) error { + if c1.SchemaType != nil && c1.SchemaType[dialect.SQLite] != "" { + t, err := sqlite.ParseType(strings.ToLower(c1.SchemaType[dialect.SQLite])) + if err != nil { + return err + } + c2.Type.Type = t + return nil + } + var t schema.Type + switch c1.Type { + case field.TypeBool: + t = &schema.BoolType{T: "bool"} + case field.TypeInt8, field.TypeUint8, field.TypeInt16, field.TypeUint16, field.TypeInt32, + field.TypeUint32, field.TypeUint, field.TypeInt, field.TypeInt64, field.TypeUint64: + t = &schema.IntegerType{T: sqlite.TypeInteger} + case field.TypeBytes: + t = &schema.BinaryType{T: sqlite.TypeBlob} + case field.TypeString, field.TypeEnum: + // SQLite does not impose any length restrictions on + // the length of strings, BLOBs or numeric values. + t = &schema.StringType{T: sqlite.TypeText} + case field.TypeFloat32, field.TypeFloat64: + t = &schema.FloatType{T: sqlite.TypeReal} + case field.TypeTime: + t = &schema.TimeType{T: "datetime"} + case field.TypeJSON: + t = &schema.JSONType{T: "json"} + case field.TypeUUID: + t = &sqlite.UUIDType{T: "uuid"} + case field.TypeOther: + t = &schema.UnsupportedType{T: c1.typ} + default: + t, err := sqlite.ParseType(strings.ToLower(c1.typ)) + if err != nil { + return err + } + c2.Type.Type = t + } + c2.Type.Type = t + return nil +} + +func (d *SQLite) atUniqueC(t1 *Table, c1 *Column, t2 *schema.Table, c2 *schema.Column) { + // For UNIQUE columns, SQLite create an implicit index named + // "sqlite_autoindex_
_". Ent uses the PostgreSQL approach + // in its migration, and name these indexes as "
__key". + for _, idx := range t1.Indexes { + // Index also defined explicitly, and will be add in atIndexes. + if idx.Unique && d.atImplicitIndexName(idx, t1, c1) { + return + } + } + t2.AddIndexes(schema.NewUniqueIndex(fmt.Sprintf("%s_%s_key", t2.Name, c1.Name)).AddColumns(c2)) +} + +func (d *SQLite) atImplicitIndexName(idx *Index, t1 *Table, c1 *Column) bool { + if idx.Name == c1.Name { + return true + } + p := fmt.Sprintf("sqlite_autoindex_%s_", t1.Name) + if !strings.HasPrefix(idx.Name, p) { + return false + } + i, err := strconv.ParseInt(strings.TrimPrefix(idx.Name, p), 10, 64) + return err == nil && i > 0 +} + +func (d *SQLite) atIncrementC(t *schema.Table, c *schema.Column) { + if c.Default != nil { + t.Attrs = removeAttr(t.Attrs, reflect.TypeOf(&sqlite.AutoIncrement{})) + } else { + c.AddAttrs(&sqlite.AutoIncrement{}) + } +} + +func (d *SQLite) atIncrementT(t *schema.Table, v int64) { + t.AddAttrs(&sqlite.AutoIncrement{Seq: v}) +} + +func (d *SQLite) atIndex(idx1 *Index, t2 *schema.Table, idx2 *schema.Index) error { + for _, c1 := range idx1.Columns { + c2, ok := t2.Column(c1.Name) + if !ok { + return fmt.Errorf("unexpected index %q column: %q", idx1.Name, c1.Name) + } + idx2.AddParts(&schema.IndexPart{C: c2}) + } + if idx1.Annotation != nil && idx1.Annotation.Where != "" { + idx2.AddAttrs(&sqlite.IndexPredicate{P: idx1.Annotation.Where}) + } + return nil +} + +func (*SQLite) atTypeRangeSQL(ts ...string) string { + for i := range ts { + ts[i] = fmt.Sprintf("('%s')", ts[i]) + } + return fmt.Sprintf("INSERT INTO `%s` (`type`) VALUES %s", TypeTable, strings.Join(ts, ", ")) +} + +type tx struct { + dialect.Tx +} + +func (tx *tx) QueryContext(ctx context.Context, query string, args ...any) (*stdsql.Rows, error) { + rows := &sql.Rows{} + if err := tx.Query(ctx, query, args, rows); err != nil { + return nil, err + } + return rows.ColumnScanner.(*stdsql.Rows), nil +} + +func (tx *tx) ExecContext(ctx context.Context, query string, args ...any) (stdsql.Result, error) { + var r stdsql.Result + if err := tx.Exec(ctx, query, args, &r); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/entgo.io/ent/dialect/sql/schema/writer.go b/vendor/entgo.io/ent/dialect/sql/schema/writer.go new file mode 100644 index 00000000..611caacb --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/schema/writer.go @@ -0,0 +1,365 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "time" + "unicode" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + + "ariga.io/atlas/sql/migrate" +) + +type ( + // WriteDriver is a driver that writes all driver exec operations to its writer. + // Note that this driver is used only for printing or writing statements to SQL + // files, and may require manual changes to the generated SQL statements. + WriteDriver struct { + dialect.Driver // optional driver for query calls. + io.Writer // target for exec statements. + FormatFunc func(string) (string, error) + } + // DirWriter implements the io.Writer interface + // for writing to an Atlas managed directory. + DirWriter struct { + Dir migrate.Dir // target directory. + Formatter migrate.Formatter // optional formatter. + b bytes.Buffer // working buffer. + changes []*migrate.Change // changes to flush. + } +) + +// Write implements the io.Writer interface. +func (d *DirWriter) Write(p []byte) (int, error) { + return d.b.Write(trimReturning(p)) +} + +// Change converts all written statement so far into a migration +// change with the given comment. +func (d *DirWriter) Change(comment string) { + // Trim semicolon and new line, because formatter adds it. + d.changes = append(d.changes, &migrate.Change{Comment: comment, Cmd: strings.TrimRight(d.b.String(), ";\n")}) + d.b.Reset() +} + +// Flush flushes the written statements to the directory. +func (d *DirWriter) Flush(name string) error { + switch { + case d.b.Len() != 0: + return fmt.Errorf("writer has undocumented change. Use Change or FlushChange instead") + case len(d.changes) == 0: + return errors.New("writer has no changes to flush") + default: + return migrate.NewPlanner(nil, d.Dir, migrate.PlanFormat(d.Formatter)). + WritePlan(&migrate.Plan{ + Name: name, + Changes: d.changes, + }) + } +} + +// FlushChange combines Change and Flush. +func (d *DirWriter) FlushChange(name, comment string) error { + d.Change(comment) + return d.Flush(name) +} + +// NewWriteDriver creates a dialect.Driver that writes all driver exec statement to its writer. +func NewWriteDriver(dialect string, w io.Writer) *WriteDriver { + return &WriteDriver{ + Writer: w, + Driver: nopDriver{dialect: dialect}, + } +} + +// Exec implements the dialect.Driver.Exec method. +func (w *WriteDriver) Exec(_ context.Context, query string, args, res any) error { + if rr, ok := res.(*sql.Result); ok { + *rr = noResult{} + } + if !strings.HasSuffix(query, ";") { + query += ";" + } + if args != nil { + args, ok := args.([]any) + if !ok { + return fmt.Errorf("unexpected args type: %T", args) + } + query = w.expandArgs(query, args) + } + _, err := io.WriteString(w, query+"\n") + return err +} + +// Query implements the dialect.Driver.Query method. +func (w *WriteDriver) Query(ctx context.Context, query string, args, res any) error { + if strings.HasPrefix(query, "INSERT") || strings.HasPrefix(query, "UPDATE") { + if err := w.Exec(ctx, query, args, nil); err != nil { + return err + } + if rr, ok := res.(*sql.Rows); ok { + cols := func() []string { + // If the query has a RETURNING clause, mock the result. + var clause string + outer: + for i := 0; i < len(query); i++ { + switch q := query[i]; { + case q == '\'', q == '"', q == '`': // string or identifier + _, skip := skipQuoted(query, i) + if skip == -1 { + return nil // malformed SQL + } + i = skip + continue + case reReturning.MatchString(query[i:]): + var j int + inner: + // Forward until next unquoted ';' appears, or we reach the end of the query. + for j = i; j < len(query); j++ { + switch query[j] { + case '\'', '"', '`': // string or identifier + _, skip := skipQuoted(query, j) + if skip == -1 { + return nil // malformed RETURNING clause + } + j = skip + case ';': + break inner + } + } + clause = query[i:j] + break outer + } + } + cols := strings.Split(reReturning.ReplaceAllString(clause, ""), ",") + for i := range cols { + cols[i] = strings.TrimSpace(cols[i]) + } + return cols + }() + *rr = sql.Rows{ColumnScanner: &noRows{cols: cols}} + } + return nil + } + switch w.Driver.(type) { + case nil, nopDriver: + return errors.New("query is not supported by the WriteDriver") + default: + return w.Driver.Query(ctx, query, args, res) + } +} + +// expandArgs combines to arguments and statement into a single statement to +// print or write into a file (before editing). +// Note, the output may be incorrect or unsafe SQL and require manual changes. +func (w *WriteDriver) expandArgs(query string, args []any) string { + var ( + b strings.Builder + p = w.placeholder() + scan = w.scanPlaceholder() + ) + for i := 0; i < len(query); i++ { + Top: + switch query[i] { + case p: + idx, size := scan(query[i+1:]) + // Unrecognized placeholder. + if idx < 0 || idx >= len(args) { + return query + } + i += size + v, err := w.formatArg(args[idx]) + if err != nil { + // Unexpected formatting error. + return query + } + b.WriteString(v) + // String or identifier. + case '\'', '"', '`': + for j := i + 1; j < len(query); j++ { + switch query[j] { + case '\\': + j++ + case query[i]: + b.WriteString(query[i : j+1]) + i = j + break Top + } + } + // Unexpected EOS. + return query + default: + b.WriteByte(query[i]) + } + } + return b.String() +} + +func (w *WriteDriver) scanPlaceholder() func(string) (int, int) { + switch w.Dialect() { + case dialect.Postgres: + return func(s string) (int, int) { + var i int + for i < len(s) && unicode.IsDigit(rune(s[i])) { + i++ + } + idx, err := strconv.ParseInt(s[:i], 10, 64) + if err != nil { + return -1, 0 + } + // Placeholders are 1-based. + return int(idx) - 1, i + } + default: + idx := -1 + return func(string) (int, int) { + idx++ + return idx, 0 + } + } +} + +func (w *WriteDriver) placeholder() byte { + if w.Dialect() == dialect.Postgres { + return '$' + } + return '?' +} + +func (w *WriteDriver) formatArg(v any) (string, error) { + if w.FormatFunc != nil { + return w.FormatFunc(fmt.Sprint(v)) + } + switch v := v.(type) { + case nil: + return "NULL", nil + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return fmt.Sprintf("%d", v), nil + case float32, float64: + return fmt.Sprintf("%g", v), nil + case bool: + if v { + return "1", nil + } else { + return "0", nil + } + case string: + return "'" + strings.ReplaceAll(v, "'", "''") + "'", nil + case json.RawMessage: + return "'" + strings.ReplaceAll(string(v), "'", "''") + "'", nil + case []byte: + return "{{ BINARY_VALUE }}", nil + case time.Time: + return "{{ TIME_VALUE }}", nil + case fmt.Stringer: + return "'" + strings.ReplaceAll(v.String(), "'", "''") + "'", nil + default: + return "{{ VALUE }}", nil + } +} + +var reReturning = regexp.MustCompile(`(?i)^\s?RETURNING`) + +// trimReturning trims any RETURNING suffix from INSERT/UPDATE queries. +// Note, that the output may be incorrect or unsafe SQL and require manual changes. +func trimReturning(query []byte) []byte { + var b bytes.Buffer +loop: + for i := 0; i < len(query); i++ { + switch q := query[i]; { + case q == '\'', q == '"', q == '`': // string or identifier + s, skip := skipQuoted(query, i) + if skip == -1 { + return query + } + b.Write(s) + i = skip + continue + case reReturning.Match(query[i:]): + // Forward until next unquoted ';' appears. + for j := i; j < len(query); j++ { // skip "RETURNING" + switch query[j] { + case '\'', '"', '`': // string or identifier + _, skip := skipQuoted(query, j) + if skip == -1 { + return query + } + j = skip + case ';': + b.WriteString(";") + i += j + continue loop + } + } + } + b.WriteByte(query[i]) + } + return b.Bytes() +} + +func skipQuoted[T []byte | string](query T, idx int) (T, int) { + for j := idx + 1; j < len(query); j++ { + switch query[j] { + case '\\': + j++ + case query[idx]: + return query[idx : j+1], j + } + } + // Unexpected EOS. + return query, -1 +} + +// Tx writes the transaction start. +func (w *WriteDriver) Tx(context.Context) (dialect.Tx, error) { + return dialect.NopTx(w), nil +} + +// noResult represents a zero result. +type noResult struct{} + +func (noResult) LastInsertId() (int64, error) { return 0, nil } +func (noResult) RowsAffected() (int64, error) { return 0, nil } + +// noRows represents no rows. +type noRows struct { + sql.ColumnScanner + cols []string + done bool +} + +func (*noRows) Close() error { return nil } +func (*noRows) Err() error { return nil } +func (r *noRows) Next() bool { + if !r.done { + r.done = true + return true + } + return false +} +func (r *noRows) Columns() ([]string, error) { return r.cols, nil } +func (*noRows) Scan(...any) error { return nil } + +type nopDriver struct { + dialect.Driver + dialect string +} + +func (d nopDriver) Dialect() string { return d.dialect } + +func (nopDriver) Query(context.Context, string, any, any) error { + return nil +} diff --git a/vendor/entgo.io/ent/dialect/sql/sql.go b/vendor/entgo.io/ent/dialect/sql/sql.go new file mode 100644 index 00000000..117f1a67 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/sql.go @@ -0,0 +1,334 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sql + +import ( + "fmt" + "strings" +) + +// The following helpers exist to simplify the way raw predicates +// are defined and used in both ent/schema and generated code. For +// full predicates API, check out the sql.P in builder.go. + +// FieldIsNull returns a raw predicate to check if the given field is NULL. +func FieldIsNull(name string) func(*Selector) { + return func(s *Selector) { + s.Where(IsNull(s.C(name))) + } +} + +// FieldNotNull returns a raw predicate to check if the given field is not NULL. +func FieldNotNull(name string) func(*Selector) { + return func(s *Selector) { + s.Where(NotNull(s.C(name))) + } +} + +// FieldEQ returns a raw predicate to check if the given field equals to the given value. +func FieldEQ(name string, v any) func(*Selector) { + return func(s *Selector) { + s.Where(EQ(s.C(name), v)) + } +} + +// FieldsEQ returns a raw predicate to check if the given fields (columns) are equal. +func FieldsEQ(field1, field2 string) func(*Selector) { + return func(s *Selector) { + s.Where(ColumnsEQ(s.C(field1), s.C(field2))) + } +} + +// FieldNEQ returns a raw predicate to check if the given field does not equal to the given value. +func FieldNEQ(name string, v any) func(*Selector) { + return func(s *Selector) { + s.Where(NEQ(s.C(name), v)) + } +} + +// FieldsNEQ returns a raw predicate to check if the given fields (columns) are not equal. +func FieldsNEQ(field1, field2 string) func(*Selector) { + return func(s *Selector) { + s.Where(ColumnsNEQ(s.C(field1), s.C(field2))) + } +} + +// FieldGT returns a raw predicate to check if the given field is greater than the given value. +func FieldGT(name string, v any) func(*Selector) { + return func(s *Selector) { + s.Where(GT(s.C(name), v)) + } +} + +// FieldsGT returns a raw predicate to check if field1 is greater than field2. +func FieldsGT(field1, field2 string) func(*Selector) { + return func(s *Selector) { + s.Where(ColumnsGT(s.C(field1), s.C(field2))) + } +} + +// FieldGTE returns a raw predicate to check if the given field is greater than or equal the given value. +func FieldGTE(name string, v any) func(*Selector) { + return func(s *Selector) { + s.Where(GTE(s.C(name), v)) + } +} + +// FieldsGTE returns a raw predicate to check if field1 is greater than or equal field2. +func FieldsGTE(field1, field2 string) func(*Selector) { + return func(s *Selector) { + s.Where(ColumnsGTE(s.C(field1), s.C(field2))) + } +} + +// FieldLT returns a raw predicate to check if the value of the field is less than the given value. +func FieldLT(name string, v any) func(*Selector) { + return func(s *Selector) { + s.Where(LT(s.C(name), v)) + } +} + +// FieldsLT returns a raw predicate to check if field1 is lower than field2. +func FieldsLT(field1, field2 string) func(*Selector) { + return func(s *Selector) { + s.Where(ColumnsLT(s.C(field1), s.C(field2))) + } +} + +// FieldLTE returns a raw predicate to check if the value of the field is less than the given value. +func FieldLTE(name string, v any) func(*Selector) { + return func(s *Selector) { + s.Where(LTE(s.C(name), v)) + } +} + +// FieldsLTE returns a raw predicate to check if field1 is lower than or equal field2. +func FieldsLTE(field1, field2 string) func(*Selector) { + return func(s *Selector) { + s.Where(ColumnsLTE(s.C(field1), s.C(field2))) + } +} + +// FieldIn returns a raw predicate to check if the value of the field is IN the given values. +func FieldIn[T any](name string, vs ...T) func(*Selector) { + return func(s *Selector) { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + s.Where(In(s.C(name), v...)) + } +} + +// FieldNotIn returns a raw predicate to check if the value of the field is NOT IN the given values. +func FieldNotIn[T any](name string, vs ...T) func(*Selector) { + return func(s *Selector) { + v := make([]any, len(vs)) + for i := range v { + v[i] = vs[i] + } + s.Where(NotIn(s.C(name), v...)) + } +} + +// FieldEqualFold returns a raw predicate to check if the field has the given prefix with case-folding. +func FieldEqualFold(name string, substr string) func(*Selector) { + return func(s *Selector) { + s.Where(EqualFold(s.C(name), substr)) + } +} + +// FieldHasPrefix returns a raw predicate to check if the field has the given prefix. +func FieldHasPrefix(name string, prefix string) func(*Selector) { + return func(s *Selector) { + s.Where(HasPrefix(s.C(name), prefix)) + } +} + +// FieldHasSuffix returns a raw predicate to check if the field has the given suffix. +func FieldHasSuffix(name string, suffix string) func(*Selector) { + return func(s *Selector) { + s.Where(HasSuffix(s.C(name), suffix)) + } +} + +// FieldContains returns a raw predicate to check if the field contains the given substring. +func FieldContains(name string, substr string) func(*Selector) { + return func(s *Selector) { + s.Where(Contains(s.C(name), substr)) + } +} + +// FieldContainsFold returns a raw predicate to check if the field contains the given substring with case-folding. +func FieldContainsFold(name string, substr string) func(*Selector) { + return func(s *Selector) { + s.Where(ContainsFold(s.C(name), substr)) + } +} + +// ColumnCheck is a function that verifies whether the +// specified column exists within the given table. +type ColumnCheck func(table, column string) error + +// NewColumnCheck returns a function that verifies whether the specified column exists +// within the given table. This function is utilized by the generated code to validate +// column names in ordering functions. +func NewColumnCheck(checks map[string]func(string) bool) ColumnCheck { + return func(table, column string) error { + check, ok := checks[table] + if !ok { + return fmt.Errorf("unknown table %q", table) + } + if !check(column) { + return fmt.Errorf("unknown column %q for table %q", column, table) + } + return nil + } +} + +type ( + // OrderFieldTerm represents an ordering by a field. + OrderFieldTerm struct { + OrderTermOptions + Field string // Field name. + } + // OrderExprTerm represents an ordering by an expression. + OrderExprTerm struct { + OrderTermOptions + Expr func(*Selector) Querier // Expression. + } + // OrderTerm represents an ordering by a term. + OrderTerm interface { + term() + } + // OrderTermOptions represents options for ordering by a term. + OrderTermOptions struct { + Desc bool // Whether to sort in descending order. + As string // Optional alias. + Selected bool // Whether the term should be selected. + NullsFirst bool // Whether to sort nulls first. + NullsLast bool // Whether to sort nulls last. + } + // OrderTermOption is an option for ordering by a term. + OrderTermOption func(*OrderTermOptions) +) + +// OrderDesc returns an option to sort in descending order. +func OrderDesc() OrderTermOption { + return func(o *OrderTermOptions) { + o.Desc = true + } +} + +// OrderAsc returns an option to sort in ascending order. +func OrderAsc() OrderTermOption { + return func(o *OrderTermOptions) { + o.Desc = false + } +} + +// OrderAs returns an option to set the alias for the ordering. +func OrderAs(as string) OrderTermOption { + return func(o *OrderTermOptions) { + o.As = as + } +} + +// OrderSelected returns an option to select the ordering term. +func OrderSelected() OrderTermOption { + return func(o *OrderTermOptions) { + o.Selected = true + } +} + +// OrderSelectAs returns an option to set and select the alias for the ordering. +func OrderSelectAs(as string) OrderTermOption { + return func(o *OrderTermOptions) { + o.As = as + o.Selected = true + } +} + +// OrderNullsFirst returns an option to sort nulls first. +func OrderNullsFirst() OrderTermOption { + return func(o *OrderTermOptions) { + o.NullsFirst = true + } +} + +// OrderNullsLast returns an option to sort nulls last. +func OrderNullsLast() OrderTermOption { + return func(o *OrderTermOptions) { + o.NullsLast = true + } +} + +// NewOrderTermOptions returns a new OrderTermOptions from the given options. +func NewOrderTermOptions(opts ...OrderTermOption) *OrderTermOptions { + o := &OrderTermOptions{} + for _, opt := range opts { + opt(o) + } + return o +} + +// OrderByField returns an ordering by the given field. +func OrderByField(field string, opts ...OrderTermOption) *OrderFieldTerm { + return &OrderFieldTerm{Field: field, OrderTermOptions: *NewOrderTermOptions(opts...)} +} + +// OrderBySum returns an ordering by the sum of the given field. +func OrderBySum(field string, opts ...OrderTermOption) *OrderExprTerm { + return orderByAgg("SUM", field, opts...) +} + +// OrderByCount returns an ordering by the count of the given field. +func OrderByCount(field string, opts ...OrderTermOption) *OrderExprTerm { + return orderByAgg("COUNT", field, opts...) +} + +// orderByAgg returns an ordering by the aggregation of the given field. +func orderByAgg(fn, field string, opts ...OrderTermOption) *OrderExprTerm { + return &OrderExprTerm{ + OrderTermOptions: *NewOrderTermOptions( + append( + // Default alias is "_". + []OrderTermOption{OrderAs(fmt.Sprintf("%s_%s", strings.ToLower(fn), field))}, + opts..., + )..., + ), + Expr: func(s *Selector) Querier { + var c string + switch { + case field == "*", isFunc(field): + c = field + default: + c = s.C(field) + } + return Raw(fmt.Sprintf("%s(%s)", fn, c)) + }, + } +} + +// ToFunc returns a function that sets the ordering on the given selector. +// This is used by the generated code. +func (f *OrderFieldTerm) ToFunc() func(*Selector) { + return func(s *Selector) { + s.OrderExprFunc(func(b *Builder) { + b.WriteString(s.C(f.Field)) + if f.Desc { + b.WriteString(" DESC") + } + if f.NullsFirst { + b.WriteString(" NULLS FIRST") + } else if f.NullsLast { + b.WriteString(" NULLS LAST") + } + }) + } +} + +func (OrderFieldTerm) term() {} +func (OrderExprTerm) term() {} diff --git a/vendor/entgo.io/ent/dialect/sql/sqlgraph/BUILD b/vendor/entgo.io/ent/dialect/sql/sqlgraph/BUILD new file mode 100644 index 00000000..ca7e49d5 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/sqlgraph/BUILD @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sqlgraph", + srcs = [ + "entql.go", + "errors.go", + "graph.go", + ], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/dialect/sql/sqlgraph", + importpath = "entgo.io/ent/dialect/sql/sqlgraph", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect", + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/entgo.io/ent/entql", + "//vendor/entgo.io/ent/schema/field", + ], +) diff --git a/vendor/entgo.io/ent/dialect/sql/sqlgraph/entql.go b/vendor/entgo.io/ent/dialect/sql/sqlgraph/entql.go new file mode 100644 index 00000000..407cc10c --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/sqlgraph/entql.go @@ -0,0 +1,334 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlgraph + +import ( + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/entql" +) + +type ( + // A Schema holds a representation of ent/schema at runtime. Each Node + // represents a single schema-type and its relations in the graph (storage). + // + // It is used for translating common graph traversal operations to the + // underlying SQL storage. For example, an operation like `has_edge(E)`, + // will be translated to an SQL lookup based on the relation type and the + // FK configuration. + Schema struct { + Nodes []*Node + } + + // A Node in the graph holds the SQL information for an ent/schema. + Node struct { + NodeSpec + + // Type holds the node type (schema name). + Type string + + // Fields maps from field names to their spec. + Fields map[string]*FieldSpec + + // Edges maps from edge names to their spec. + Edges map[string]struct { + To *Node + Spec *EdgeSpec + } + } +) + +// AddE adds an edge to the graph. It fails, if one of the node +// types is missing. +// +// g.AddE("pets", spec, "user", "pet") +// g.AddE("friends", spec, "user", "user") +func (g *Schema) AddE(name string, spec *EdgeSpec, from, to string) error { + var fromT, toT *Node + for i := range g.Nodes { + t := g.Nodes[i].Type + if t == from { + fromT = g.Nodes[i] + } + if t == to { + toT = g.Nodes[i] + } + } + if fromT == nil || toT == nil { + return fmt.Errorf("from/to type was not found") + } + if fromT.Edges == nil { + fromT.Edges = make(map[string]struct { + To *Node + Spec *EdgeSpec + }) + } + fromT.Edges[name] = struct { + To *Node + Spec *EdgeSpec + }{ + To: toT, + Spec: spec, + } + return nil +} + +// MustAddE is like AddE but panics if the edge can be added to the graph. +func (g *Schema) MustAddE(name string, spec *EdgeSpec, from, to string) { + if err := g.AddE(name, spec, from, to); err != nil { + panic(err) + } +} + +// EvalP evaluates the entql predicate on the given selector (query builder). +func (g *Schema) EvalP(nodeType string, p entql.P, selector *sql.Selector) error { + var node *Node + for i := range g.Nodes { + if g.Nodes[i].Type == nodeType { + node = g.Nodes[i] + break + } + } + if node == nil { + return fmt.Errorf("node %s was not found in the graph schema", nodeType) + } + pr, err := evalExpr(node, selector, p) + if err != nil { + return err + } + selector.Where(pr) + return nil +} + +// FuncSelector represents a selector function to be used as an entql foreign-function. +const FuncSelector entql.Func = "func_selector" + +// wrappedFunc wraps the selector-function to an ent-expression. +type wrappedFunc struct { + entql.Expr + Func func(*sql.Selector) +} + +// WrapFunc wraps a selector-func with an entql call expression. +func WrapFunc(s func(*sql.Selector)) *entql.CallExpr { + return &entql.CallExpr{ + Func: FuncSelector, + Args: []entql.Expr{wrappedFunc{Func: s}}, + } +} + +var ( + binary = [...]sql.Op{ + entql.OpEQ: sql.OpEQ, + entql.OpNEQ: sql.OpNEQ, + entql.OpGT: sql.OpGT, + entql.OpGTE: sql.OpGTE, + entql.OpLT: sql.OpLT, + entql.OpLTE: sql.OpLTE, + entql.OpIn: sql.OpIn, + entql.OpNotIn: sql.OpNotIn, + } + nary = [...]func(...*sql.Predicate) *sql.Predicate{ + entql.OpAnd: sql.And, + entql.OpOr: sql.Or, + } + strFunc = map[entql.Func]func(string, string) *sql.Predicate{ + entql.FuncContains: sql.Contains, + entql.FuncContainsFold: sql.ContainsFold, + entql.FuncEqualFold: sql.EqualFold, + entql.FuncHasPrefix: sql.HasPrefix, + entql.FuncHasSuffix: sql.HasSuffix, + } + nullFunc = [...]func(string) *sql.Predicate{ + entql.OpEQ: sql.IsNull, + entql.OpNEQ: sql.NotNull, + } +) + +// state represents the state of a predicate evaluation. +// Note that, the evaluation output is a predicate to be +// applied on the database. +type state struct { + sql.Builder + context *Node + selector *sql.Selector +} + +// evalExpr evaluates the entql expression and returns a new SQL predicate to be applied on the database. +func evalExpr(context *Node, selector *sql.Selector, expr entql.Expr) (p *sql.Predicate, err error) { + ex := &state{ + context: context, + selector: selector, + } + defer catch(&err) + p = ex.evalExpr(expr) + return +} + +// evalExpr evaluates any expression. +func (e *state) evalExpr(expr entql.Expr) *sql.Predicate { + switch expr := expr.(type) { + case *entql.BinaryExpr: + return e.evalBinary(expr) + case *entql.UnaryExpr: + return sql.Not(e.evalExpr(expr.X)) + case *entql.NaryExpr: + ps := make([]*sql.Predicate, len(expr.Xs)) + for i, x := range expr.Xs { + ps[i] = e.evalExpr(x) + } + return nary[expr.Op](ps...) + case *entql.CallExpr: + switch expr.Func { + case entql.FuncHasPrefix, entql.FuncHasSuffix, entql.FuncContains, entql.FuncEqualFold, entql.FuncContainsFold: + expect(len(expr.Args) == 2, "invalid number of arguments for %s", expr.Func) + f, ok := expr.Args[0].(*entql.Field) + expect(ok, "*entql.Field, got %T", expr.Args[0]) + v, ok := expr.Args[1].(*entql.Value) + expect(ok, "*entql.Value, got %T", expr.Args[1]) + s, ok := v.V.(string) + expect(ok, "string value, got %T", v.V) + return strFunc[expr.Func](e.field(f), s) + case entql.FuncHasEdge: + expect(len(expr.Args) > 0, "invalid number of arguments for %s", expr.Func) + edge, ok := expr.Args[0].(*entql.Edge) + expect(ok, "*entql.Edge, got %T", expr.Args[0]) + return e.evalEdge(edge.Name, expr.Args[1:]...) + } + } + panic("invalid") +} + +// evalBinary evaluates binary expressions. +func (e *state) evalBinary(expr *entql.BinaryExpr) *sql.Predicate { + switch expr.Op { + case entql.OpOr: + return sql.Or(e.evalExpr(expr.X), e.evalExpr(expr.Y)) + case entql.OpAnd: + return sql.And(e.evalExpr(expr.X), e.evalExpr(expr.Y)) + case entql.OpEQ, entql.OpNEQ: + if expr.Y == (*entql.Value)(nil) { + f, ok := expr.X.(*entql.Field) + expect(ok, "*entql.Field, got %T", expr.Y) + return nullFunc[expr.Op](e.field(f)) + } + fallthrough + default: + field, ok := expr.X.(*entql.Field) + expect(ok, "expr.X to be *entql.Field (got %T)", expr.X) + _, ok = expr.Y.(*entql.Field) + if !ok { + _, ok = expr.Y.(*entql.Value) + } + expect(ok, "expr.Y to be *entql.Field or *entql.Value (got %T)", expr.X) + switch x := expr.Y.(type) { + case *entql.Field: + return sql.ColumnsOp(e.field(field), e.field(x), binary[expr.Op]) + case *entql.Value: + c := e.field(field) + return sql.P(func(b *sql.Builder) { + b.Ident(c).WriteOp(binary[expr.Op]) + args(b, x) + }) + default: + panic("unreachable") + } + } +} + +// evalEdge evaluates has-edge and has-edge-with calls. +func (e *state) evalEdge(name string, exprs ...entql.Expr) *sql.Predicate { + edge, ok := e.context.Edges[name] + expect(ok, "edge %q was not found for node %q", name, e.context.Type) + var fromC, toC string + switch { + case edge.To.ID != nil: + toC = edge.To.ID.Column + // Edge-owner points to its edge schema. + case edge.To.CompositeID != nil && !edge.Spec.Inverse: + toC = edge.To.CompositeID[0].Column + // Edge-backref points to its edge schema. + case edge.To.CompositeID != nil && edge.Spec.Inverse: + toC = edge.To.CompositeID[1].Column + default: + panic(evalError{fmt.Sprintf("expect id definition for edge %q", name)}) + } + switch { + case e.context.ID != nil: + fromC = e.context.ID.Column + case e.context.CompositeID != nil && (edge.Spec.Rel == M2O || (edge.Spec.Rel == O2O && edge.Spec.Inverse)): + // An edge-schema with a composite id can query + // only edges that it owns (holds the foreign-key). + default: + panic(evalError{fmt.Sprintf("unexpected edge-query from an edge-schema %q", e.context.Type)}) + } + step := NewStep( + From(e.context.Table, fromC), + To(edge.To.Table, toC), + Edge(edge.Spec.Rel, edge.Spec.Inverse, edge.Spec.Table, edge.Spec.Columns...), + ) + selector := e.selector.Clone().SetP(nil) + selector.SetTotal(e.Total()) + if len(exprs) == 0 { + HasNeighbors(selector, step) + return selector.P() + } + HasNeighborsWith(selector, step, func(s *sql.Selector) { + for _, expr := range exprs { + if cx, ok := expr.(*entql.CallExpr); ok && cx.Func == FuncSelector { + expect(len(cx.Args) == 1, "invalid number of arguments for %s", FuncSelector) + wrapped, ok := cx.Args[0].(wrappedFunc) + expect(ok, "invalid argument for %s: %T", FuncSelector, cx.Args[0]) + wrapped.Func(s) + } else { + p, err := evalExpr(edge.To, s, expr) + expect(err == nil, "edge evaluation failed for %s->%s: %s", e.context.Type, name, err) + s.Where(p) + } + } + }) + return selector.P() +} + +func (e *state) field(f *entql.Field) string { + _, ok := e.context.Fields[f.Name] + expect(ok || e.context.ID.Column == f.Name, "field %q was not found for node %q", f.Name, e.context.Type) + return e.selector.C(f.Name) +} + +func args(b *sql.Builder, v *entql.Value) { + vs, ok := v.V.([]any) + if !ok { + b.Arg(v.V) + return + } + b.WriteByte('(').Args(vs...).WriteByte(')') +} + +// expect panics if the condition is false. +func expect(cond bool, msg string, args ...any) { + if !cond { + panic(evalError{fmt.Sprintf("expect "+msg, args...)}) + } +} + +type evalError struct { + msg string +} + +func (p evalError) Error() string { + return fmt.Sprintf("sqlgraph: %s", p.msg) +} + +func catch(err *error) { + if e := recover(); e != nil { + xerr, ok := e.(evalError) + if !ok { + panic(e) + } + *err = xerr + } +} diff --git a/vendor/entgo.io/ent/dialect/sql/sqlgraph/errors.go b/vendor/entgo.io/ent/dialect/sql/sqlgraph/errors.go new file mode 100644 index 00000000..40265adf --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/sqlgraph/errors.go @@ -0,0 +1,47 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqlgraph + +import ( + "errors" + "strings" +) + +// IsConstraintError returns true if the error resulted from a database constraint violation. +func IsConstraintError(err error) bool { + var e *ConstraintError + return errors.As(err, &e) || IsUniqueConstraintError(err) || IsForeignKeyConstraintError(err) +} + +// IsUniqueConstraintError reports if the error resulted from a DB uniqueness constraint violation. +// e.g. duplicate value in unique index. +func IsUniqueConstraintError(err error) bool { + for _, s := range []string{ + "Error 1062", // MySQL + "violates unique constraint", // Postgres + "UNIQUE constraint failed", // SQLite + } { + if strings.Contains(err.Error(), s) { + return true + } + } + return false +} + +// IsForeignKeyConstraintError reports if the error resulted from a database foreign-key constraint violation. +// e.g. parent row does not exist. +func IsForeignKeyConstraintError(err error) bool { + for _, s := range []string{ + "Error 1451", // MySQL (Cannot delete or update a parent row). + "Error 1452", // MySQL (Cannot add or update a child row). + "violates foreign key constraint", // Postgres + "FOREIGN KEY constraint failed", // SQLite + } { + if strings.Contains(err.Error(), s) { + return true + } + } + return false +} diff --git a/vendor/entgo.io/ent/dialect/sql/sqlgraph/graph.go b/vendor/entgo.io/ent/dialect/sql/sqlgraph/graph.go new file mode 100644 index 00000000..bc4cfdd5 --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/sqlgraph/graph.go @@ -0,0 +1,1964 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +// Package sqlgraph provides graph abstraction capabilities on top +// of sql-based databases for ent codegen. +package sqlgraph + +import ( + "context" + "database/sql/driver" + "encoding/json" + "fmt" + "math" + "sort" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/schema/field" +) + +// Rel is an edge relation type. +type Rel int + +// Relation types. +const ( + _ Rel = iota // Unknown. + O2O // One to one / has one. + O2M // One to many / has many. + M2O // Many to one (inverse perspective for O2M). + M2M // Many to many. +) + +// String returns the relation name. +func (r Rel) String() (s string) { + switch r { + case O2O: + s = "O2O" + case O2M: + s = "O2M" + case M2O: + s = "M2O" + case M2M: + s = "M2M" + default: + s = "Unknown" + } + return s +} + +// A ConstraintError represents an error from mutation that violates a specific constraint. +type ConstraintError struct { + msg string +} + +func (e ConstraintError) Error() string { return e.msg } + +// A Step provides a path-step information to the traversal functions. +type Step struct { + // From is the source of the step. + From struct { + // V can be either one vertex or set of vertices. + // It can be a pre-processed step (sql.Query) or a simple Go type (integer or string). + V any + // Table holds the table name of V (from). + Table string + // Column to join with. Usually the "id" column. + Column string + } + // Edge holds the edge information for getting the neighbors. + Edge struct { + // Rel of the edge. + Rel Rel + // Schema is an optional name of the database + // where the table is defined. + Schema string + // Table name of where this edge columns reside. + Table string + // Columns of the edge. + // In O2O and M2O, it holds the foreign-key column. Hence, len == 1. + // In M2M, it holds the primary-key columns of the join table. Hence, len == 2. + Columns []string + // Inverse indicates if the edge is an inverse edge. + Inverse bool + } + // To is the dest of the path (the neighbors). + To struct { + // Table holds the table name of the neighbors (to). + Table string + // Schema is an optional name of the database + // where the table is defined. + Schema string + // Column to join with. Usually the "id" column. + Column string + } +} + +// StepOption allows configuring Steps using functional options. +type StepOption func(*Step) + +// From sets the source of the step. +func From(table, column string, v ...any) StepOption { + return func(s *Step) { + s.From.Table = table + s.From.Column = column + if len(v) > 0 { + s.From.V = v[0] + } + } +} + +// To sets the destination of the step. +func To(table, column string) StepOption { + return func(s *Step) { + s.To.Table = table + s.To.Column = column + } +} + +// Edge sets the edge info for getting the neighbors. +func Edge(rel Rel, inverse bool, table string, columns ...string) StepOption { + return func(s *Step) { + s.Edge.Rel = rel + s.Edge.Table = table + s.Edge.Columns = columns + s.Edge.Inverse = inverse + } +} + +// NewStep gets list of options and returns a configured step. +// +// NewStep( +// From("table", "pk", V), +// To("table", "pk"), +// Edge("name", O2M, "fk"), +// ) +func NewStep(opts ...StepOption) *Step { + s := &Step{} + for _, opt := range opts { + opt(s) + } + return s +} + +// FromEdgeOwner returns true if the step is from an edge owner. +// i.e., from the table that holds the foreign-key. +func (s *Step) FromEdgeOwner() bool { + return s.Edge.Rel == M2O || (s.Edge.Rel == O2O && s.Edge.Inverse) +} + +// ToEdgeOwner returns true if the step is to an edge owner. +// i.e., to the table that holds the foreign-key. +func (s *Step) ToEdgeOwner() bool { + return s.Edge.Rel == O2M || (s.Edge.Rel == O2O && !s.Edge.Inverse) +} + +// ThroughEdgeTable returns true if the step is through a join-table. +func (s *Step) ThroughEdgeTable() bool { + return s.Edge.Rel == M2M +} + +// Neighbors returns a Selector for evaluating the path-step +// and getting the neighbors of one vertex. +func Neighbors(dialect string, s *Step) (q *sql.Selector) { + builder := sql.Dialect(dialect) + switch { + case s.ThroughEdgeTable(): + pk1, pk2 := s.Edge.Columns[1], s.Edge.Columns[0] + if s.Edge.Inverse { + pk1, pk2 = pk2, pk1 + } + to := builder.Table(s.To.Table).Schema(s.To.Schema) + join := builder.Table(s.Edge.Table).Schema(s.Edge.Schema) + match := builder.Select(join.C(pk1)). + From(join). + Where(sql.EQ(join.C(pk2), s.From.V)) + q = builder.Select(). + From(to). + Join(match). + On(to.C(s.To.Column), match.C(pk1)) + case s.FromEdgeOwner(): + t1 := builder.Table(s.To.Table).Schema(s.To.Schema) + t2 := builder.Select(s.Edge.Columns[0]). + From(builder.Table(s.Edge.Table).Schema(s.Edge.Schema)). + Where(sql.EQ(s.From.Column, s.From.V)) + q = builder.Select(). + From(t1). + Join(t2). + On(t1.C(s.To.Column), t2.C(s.Edge.Columns[0])) + case s.ToEdgeOwner(): + q = builder.Select(). + From(builder.Table(s.To.Table).Schema(s.To.Schema)). + Where(sql.EQ(s.Edge.Columns[0], s.From.V)) + } + return q +} + +// SetNeighbors returns a Selector for evaluating the path-step +// and getting the neighbors of set of vertices. +func SetNeighbors(dialect string, s *Step) (q *sql.Selector) { + set := s.From.V.(*sql.Selector) + builder := sql.Dialect(dialect) + switch { + case s.ThroughEdgeTable(): + pk1, pk2 := s.Edge.Columns[1], s.Edge.Columns[0] + if s.Edge.Inverse { + pk1, pk2 = pk2, pk1 + } + to := builder.Table(s.To.Table).Schema(s.To.Schema) + set.Select(set.C(s.From.Column)) + join := builder.Table(s.Edge.Table).Schema(s.Edge.Schema) + match := builder.Select(join.C(pk1)). + From(join). + Join(set). + On(join.C(pk2), set.C(s.From.Column)) + q = builder.Select(). + From(to). + Join(match). + On(to.C(s.To.Column), match.C(pk1)) + case s.FromEdgeOwner(): + t1 := builder.Table(s.To.Table).Schema(s.To.Schema) + set.Select(set.C(s.Edge.Columns[0])) + q = builder.Select(). + From(t1). + Join(set). + On(t1.C(s.To.Column), set.C(s.Edge.Columns[0])) + case s.ToEdgeOwner(): + t1 := builder.Table(s.To.Table).Schema(s.To.Schema) + set.Select(set.C(s.From.Column)) + q = builder.Select(). + From(t1). + Join(set). + On(t1.C(s.Edge.Columns[0]), set.C(s.From.Column)) + } + return q +} + +// HasNeighbors applies on the given Selector a neighbors check. +func HasNeighbors(q *sql.Selector, s *Step) { + builder := sql.Dialect(q.Dialect()) + switch { + case s.ThroughEdgeTable(): + pk1 := s.Edge.Columns[0] + if s.Edge.Inverse { + pk1 = s.Edge.Columns[1] + } + join := builder.Table(s.Edge.Table).Schema(s.Edge.Schema) + q.Where( + sql.In( + q.C(s.From.Column), + builder.Select(join.C(pk1)).From(join), + ), + ) + case s.FromEdgeOwner(): + q.Where(sql.NotNull(q.C(s.Edge.Columns[0]))) + case s.ToEdgeOwner(): + to := builder.Table(s.Edge.Table).Schema(s.Edge.Schema) + // In case the edge reside on the same table, give + // the edge an alias to make qualifier different. + if s.From.Table == s.Edge.Table { + to.As(fmt.Sprintf("%s_edge", s.Edge.Table)) + } + q.Where( + sql.Exists( + builder.Select(to.C(s.Edge.Columns[0])). + From(to). + Where( + sql.ColumnsEQ( + q.C(s.From.Column), + to.C(s.Edge.Columns[0]), + ), + ), + ), + ) + } +} + +// HasNeighborsWith applies on the given Selector a neighbors check. +// The given predicate applies its filtering on the selector. +func HasNeighborsWith(q *sql.Selector, s *Step, pred func(*sql.Selector)) { + builder := sql.Dialect(q.Dialect()) + switch { + case s.ThroughEdgeTable(): + pk1, pk2 := s.Edge.Columns[1], s.Edge.Columns[0] + if s.Edge.Inverse { + pk1, pk2 = pk2, pk1 + } + to := builder.Table(s.To.Table).Schema(s.To.Schema) + edge := builder.Table(s.Edge.Table).Schema(s.Edge.Schema) + join := builder.Select(edge.C(pk2)). + From(edge). + Join(to). + On(edge.C(pk1), to.C(s.To.Column)) + matches := builder.Select().From(to) + matches.WithContext(q.Context()) + pred(matches) + join.FromSelect(matches) + q.Where(sql.In(q.C(s.From.Column), join)) + case s.FromEdgeOwner(): + to := builder.Table(s.To.Table).Schema(s.To.Schema) + matches := builder.Select(to.C(s.To.Column)). + From(to) + matches.WithContext(q.Context()) + pred(matches) + q.Where(sql.In(q.C(s.Edge.Columns[0]), matches)) + case s.ToEdgeOwner(): + to := builder.Table(s.Edge.Table).Schema(s.Edge.Schema) + matches := builder.Select(to.C(s.Edge.Columns[0])). + From(to) + matches.WithContext(q.Context()) + pred(matches) + q.Where(sql.In(q.C(s.From.Column), matches)) + } +} + +// countAlias returns the alias to use for the count column. +func countAlias(q *sql.Selector, s *Step, opt *sql.OrderTermOptions) string { + if opt.As != "" { + return opt.As + } + selected := make(map[string]struct{}) + for _, c := range q.SelectedColumns() { + selected[c] = struct{}{} + } + column := fmt.Sprintf("count_%s", s.To.Table) + // If the column was already selected, + // try to find a free alias. + if _, ok := selected[column]; ok { + for i := 1; i <= 5; i++ { + ci := fmt.Sprintf("%s_%d", column, i) + if _, ok := selected[ci]; !ok { + return ci + } + } + } + return column +} + +// OrderByNeighborsCount appends ordering based on the number of neighbors. +// For example, order users by their number of posts. +func OrderByNeighborsCount(q *sql.Selector, s *Step, opts ...sql.OrderTermOption) { + var ( + join *sql.Selector + opt = sql.NewOrderTermOptions(opts...) + build = sql.Dialect(q.Dialect()) + ) + switch { + case s.FromEdgeOwner(): + // For M2O and O2O inverse, the FK resides in the same table. + // Hence, the order by is on the nullability of the column. + x := func(b *sql.Builder) { + b.Ident(s.From.Column) + if opt.Desc { + b.WriteOp(sql.OpNotNull) + } else { + b.WriteOp(sql.OpIsNull) + } + } + q.OrderExpr(build.Expr(x)) + case s.ThroughEdgeTable(): + countAs := countAlias(q, s, opt) + terms := []sql.OrderTerm{ + sql.OrderByCount("*", append([]sql.OrderTermOption{sql.OrderAs(countAs)}, opts...)...), + } + pk1 := s.Edge.Columns[0] + if s.Edge.Inverse { + pk1 = s.Edge.Columns[1] + } + joinT := build.Table(s.Edge.Table).Schema(s.Edge.Schema) + join = build.Select( + joinT.C(pk1), + ).From(joinT).GroupBy(joinT.C(pk1)) + selectTerms(join, terms) + q.LeftJoin(join). + On( + q.C(s.From.Column), + join.C(pk1), + ) + orderTerms(q, join, terms) + case s.ToEdgeOwner(): + countAs := countAlias(q, s, opt) + terms := []sql.OrderTerm{ + sql.OrderByCount("*", append([]sql.OrderTermOption{sql.OrderAs(countAs)}, opts...)...), + } + edgeT := build.Table(s.Edge.Table).Schema(s.Edge.Schema) + join = build.Select( + edgeT.C(s.Edge.Columns[0]), + ).From(edgeT).GroupBy(edgeT.C(s.Edge.Columns[0])) + selectTerms(join, terms) + q.LeftJoin(join). + On( + q.C(s.From.Column), + join.C(s.Edge.Columns[0]), + ) + orderTerms(q, join, terms) + } +} + +func orderTerms(q, join *sql.Selector, ts []sql.OrderTerm) { + for _, t := range ts { + t := t + var ( + // Order by column or expression. + orderC string + orderX func(*sql.Selector) sql.Querier + // Order by options. + desc, nullsfirst, nullslast bool + ) + switch t := t.(type) { + case *sql.OrderFieldTerm: + f := t.Field + if t.As != "" { + f = t.As + } + orderC = join.C(f) + if t.Selected { + q.AppendSelect(orderC) + } + desc = t.Desc + nullsfirst = t.NullsFirst + nullslast = t.NullsLast + case *sql.OrderExprTerm: + if t.As != "" { + orderC = join.C(t.As) + if t.Selected { + q.AppendSelect(orderC) + } + } else { + orderX = t.Expr + } + desc = t.Desc + nullsfirst = t.NullsFirst + nullslast = t.NullsLast + default: + continue + } + q.OrderExprFunc(func(b *sql.Builder) { + // Write the ORDER BY term. + switch { + case orderC != "": + b.WriteString(orderC) + case orderX != nil: + b.Join(orderX(join)) + } + // Unlike MySQL and SQLite, NULL values sort as if larger than any other value. Therefore, + // we need to explicitly order NULLs first on ASC and last on DESC unless specified otherwise. + switch normalizePG := b.Dialect() == dialect.Postgres && !nullsfirst && !nullslast; { + case normalizePG && desc: + b.WriteString(" DESC NULLS LAST") + case normalizePG: + b.WriteString(" NULLS FIRST") + case desc: + b.WriteString(" DESC") + } + if nullsfirst { + b.WriteString(" NULLS FIRST") + } else if nullslast { + b.WriteString(" NULLS LAST") + } + }) + } +} + +// selectTerms appends the select terms to the joined query. +// Afterward, the term aliases are utilized to order the root query. +func selectTerms(q *sql.Selector, ts []sql.OrderTerm) { + for _, t := range ts { + switch t := t.(type) { + case *sql.OrderFieldTerm: + if t.As != "" { + q.AppendSelectAs(q.C(t.Field), t.As) + } else { + q.AppendSelect(q.C(t.Field)) + } + case *sql.OrderExprTerm: + q.AppendSelectExprAs(t.Expr(q), t.As) + } + } +} + +// OrderByNeighborTerms appends ordering based on the number of neighbors. +// For example, order users by their number of posts. +func OrderByNeighborTerms(q *sql.Selector, s *Step, opts ...sql.OrderTerm) { + var ( + join *sql.Selector + build = sql.Dialect(q.Dialect()) + ) + switch { + case s.FromEdgeOwner(): + toT := build.Table(s.To.Table).Schema(s.To.Schema) + join = build.Select(toT.C(s.To.Column)). + From(toT) + selectTerms(join, opts) + q.LeftJoin(join). + On(q.C(s.Edge.Columns[0]), join.C(s.To.Column)) + case s.ThroughEdgeTable(): + pk1, pk2 := s.Edge.Columns[1], s.Edge.Columns[0] + if s.Edge.Inverse { + pk1, pk2 = pk2, pk1 + } + toT := build.Table(s.To.Table).Schema(s.To.Schema) + joinT := build.Table(s.Edge.Table).Schema(s.Edge.Schema) + join = build.Select(pk2). + From(toT). + Join(joinT). + On(toT.C(s.To.Column), joinT.C(pk1)). + GroupBy(pk2) + selectTerms(join, opts) + q.LeftJoin(join). + On(q.C(s.From.Column), join.C(pk2)) + case s.ToEdgeOwner(): + toT := build.Table(s.Edge.Table).Schema(s.Edge.Schema) + join = build.Select(toT.C(s.Edge.Columns[0])). + From(toT). + GroupBy(toT.C(s.Edge.Columns[0])) + selectTerms(join, opts) + q.LeftJoin(join). + On(q.C(s.From.Column), join.C(s.Edge.Columns[0])) + } + orderTerms(q, join, opts) +} + +type ( + // FieldSpec holds the information for updating a field + // column in the database. + FieldSpec struct { + Column string + Type field.Type + Value driver.Value // value to be stored. + } + + // EdgeTarget holds the information for the target nodes + // of an edge. + EdgeTarget struct { + Nodes []driver.Value + IDSpec *FieldSpec + // Additional fields can be set on the + // edge join table. Valid for M2M edges. + Fields []*FieldSpec + } + + // EdgeSpec holds the information for updating a field + // column in the database. + EdgeSpec struct { + Rel Rel + Inverse bool + Table string + Schema string + Columns []string + Bidi bool // bidirectional edge. + Target *EdgeTarget // target nodes. + } + + // EdgeSpecs used for perform common operations on list of edges. + EdgeSpecs []*EdgeSpec + + // NodeSpec defines the information for querying and + // decoding nodes in the graph. + NodeSpec struct { + Table string + Schema string + Columns []string + ID *FieldSpec // primary key. + CompositeID []*FieldSpec // composite id (edge schema). + } +) + +// NewFieldSpec creates a new FieldSpec with its required fields. +func NewFieldSpec(column string, typ field.Type) *FieldSpec { + return &FieldSpec{Column: column, Type: typ} +} + +// AddColumnOnce adds the given column to the spec if it is not already present. +func (n *NodeSpec) AddColumnOnce(column string) *NodeSpec { + for _, c := range n.Columns { + if c == column { + return n + } + } + n.Columns = append(n.Columns, column) + return n +} + +// FieldValues returns the values of additional fields that were set on the join-table. +func (e *EdgeTarget) FieldValues() []any { + vs := make([]any, len(e.Fields)) + for i, f := range e.Fields { + vs[i] = f.Value + } + return vs +} + +type ( + // CreateSpec holds the information for creating + // a node in the graph. + CreateSpec struct { + Table string + Schema string + ID *FieldSpec + Fields []*FieldSpec + Edges []*EdgeSpec + + // The OnConflict option allows providing on-conflict + // options to the INSERT statement. + // + // sqlgraph.CreateSpec{ + // OnConflict: []sql.ConflictOption{ + // sql.ResolveWithNewValues(), + // }, + // } + // + OnConflict []sql.ConflictOption + } + + // BatchCreateSpec holds the information for creating + // multiple nodes in the graph. + BatchCreateSpec struct { + Nodes []*CreateSpec + + // The OnConflict option allows providing on-conflict + // options to the INSERT statement. + // + // sqlgraph.CreateSpec{ + // OnConflict: []sql.ConflictOption{ + // sql.ResolveWithNewValues(), + // }, + // } + // + OnConflict []sql.ConflictOption + } +) + +// NewCreateSpec creates a new node creation spec. +func NewCreateSpec(table string, id *FieldSpec) *CreateSpec { + return &CreateSpec{Table: table, ID: id} +} + +// SetField appends a new field setter to the creation spec. +func (u *CreateSpec) SetField(column string, t field.Type, value driver.Value) { + u.Fields = append(u.Fields, &FieldSpec{ + Column: column, + Type: t, + Value: value, + }) +} + +// CreateNode applies the CreateSpec on the graph. The operation creates a new +// record in the database, and connects it to other nodes specified in spec.Edges. +func CreateNode(ctx context.Context, drv dialect.Driver, spec *CreateSpec) error { + gr := graph{tx: drv, builder: sql.Dialect(drv.Dialect())} + cr := &creator{CreateSpec: spec, graph: gr} + return cr.node(ctx, drv) +} + +// BatchCreate applies the BatchCreateSpec on the graph. +func BatchCreate(ctx context.Context, drv dialect.Driver, spec *BatchCreateSpec) error { + gr := graph{tx: drv, builder: sql.Dialect(drv.Dialect())} + cr := &batchCreator{BatchCreateSpec: spec, graph: gr} + return cr.nodes(ctx, drv) +} + +type ( + // EdgeMut defines edge mutations. + EdgeMut struct { + Add []*EdgeSpec + Clear []*EdgeSpec + } + + // FieldMut defines field mutations. + FieldMut struct { + Set []*FieldSpec // field = ? + Add []*FieldSpec // field = field + ? + Clear []*FieldSpec // field = NULL + } + + // UpdateSpec holds the information for updating one + // or more nodes in the graph. + UpdateSpec struct { + Node *NodeSpec + Edges EdgeMut + Fields FieldMut + Predicate func(*sql.Selector) + Modifiers []func(*sql.UpdateBuilder) + + ScanValues func(columns []string) ([]any, error) + Assign func(columns []string, values []any) error + } +) + +// NewUpdateSpec creates a new node update spec. +func NewUpdateSpec(table string, columns []string, id ...*FieldSpec) *UpdateSpec { + spec := &UpdateSpec{ + Node: &NodeSpec{Table: table, Columns: columns}, + } + switch { + case len(id) == 1: + spec.Node.ID = id[0] + case len(id) > 1: + spec.Node.CompositeID = id + } + return spec +} + +// AddModifier adds a new statement modifier to the spec. +func (u *UpdateSpec) AddModifier(m func(*sql.UpdateBuilder)) { + u.Modifiers = append(u.Modifiers, m) +} + +// AddModifiers adds a list of statement modifiers to the spec. +func (u *UpdateSpec) AddModifiers(m ...func(*sql.UpdateBuilder)) { + u.Modifiers = append(u.Modifiers, m...) +} + +// SetField appends a new field setter to the update spec. +func (u *UpdateSpec) SetField(column string, t field.Type, value driver.Value) { + u.Fields.Set = append(u.Fields.Set, &FieldSpec{ + Column: column, + Type: t, + Value: value, + }) +} + +// AddField appends a new field adder to the update spec. +func (u *UpdateSpec) AddField(column string, t field.Type, value driver.Value) { + u.Fields.Add = append(u.Fields.Add, &FieldSpec{ + Column: column, + Type: t, + Value: value, + }) +} + +// ClearField appends a new field cleaner (set to NULL) to the update spec. +func (u *UpdateSpec) ClearField(column string, t field.Type) { + u.Fields.Clear = append(u.Fields.Clear, &FieldSpec{ + Column: column, + Type: t, + }) +} + +// UpdateNode applies the UpdateSpec on one node in the graph. +func UpdateNode(ctx context.Context, drv dialect.Driver, spec *UpdateSpec) error { + tx, err := drv.Tx(ctx) + if err != nil { + return err + } + gr := graph{tx: tx, builder: sql.Dialect(drv.Dialect())} + cr := &updater{UpdateSpec: spec, graph: gr} + if err := cr.node(ctx, tx); err != nil { + return rollback(tx, err) + } + return tx.Commit() +} + +// UpdateNodes applies the UpdateSpec on a set of nodes in the graph. +func UpdateNodes(ctx context.Context, drv dialect.Driver, spec *UpdateSpec) (int, error) { + gr := graph{tx: drv, builder: sql.Dialect(drv.Dialect())} + cr := &updater{UpdateSpec: spec, graph: gr} + return cr.nodes(ctx, drv) +} + +// NotFoundError returns when trying to update an +// entity, and it was not found in the database. +type NotFoundError struct { + table string + id driver.Value +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("record with id %v not found in table %s", e.id, e.table) +} + +// DeleteSpec holds the information for delete one +// or more nodes in the graph. +type DeleteSpec struct { + Node *NodeSpec + Predicate func(*sql.Selector) +} + +// NewDeleteSpec creates a new node deletion spec. +func NewDeleteSpec(table string, id *FieldSpec) *DeleteSpec { + return &DeleteSpec{Node: &NodeSpec{Table: table, ID: id}} +} + +// DeleteNodes applies the DeleteSpec on the graph. +func DeleteNodes(ctx context.Context, drv dialect.Driver, spec *DeleteSpec) (int, error) { + var ( + res sql.Result + builder = sql.Dialect(drv.Dialect()) + ) + selector := builder.Select(). + From(builder.Table(spec.Node.Table).Schema(spec.Node.Schema)). + WithContext(ctx) + if pred := spec.Predicate; pred != nil { + pred(selector) + } + query, args := builder.Delete(spec.Node.Table).Schema(spec.Node.Schema).FromSelect(selector).Query() + if err := drv.Exec(ctx, query, args, &res); err != nil { + return 0, err + } + affected, err := res.RowsAffected() + if err != nil { + return 0, err + } + return int(affected), nil +} + +// QuerySpec holds the information for querying +// nodes in the graph. +type QuerySpec struct { + Node *NodeSpec // Nodes info. + From *sql.Selector // Optional query source (from path). + + Limit int + Offset int + Unique bool + Order func(*sql.Selector) + Predicate func(*sql.Selector) + Modifiers []func(*sql.Selector) + + ScanValues func(columns []string) ([]any, error) + Assign func(columns []string, values []any) error +} + +// NewQuerySpec creates a new node query spec. +func NewQuerySpec(table string, columns []string, id *FieldSpec) *QuerySpec { + return &QuerySpec{ + Node: &NodeSpec{ + ID: id, + Table: table, + Columns: columns, + }, + } +} + +// QueryNodes queries the nodes in the graph query and scans them to the given values. +func QueryNodes(ctx context.Context, drv dialect.Driver, spec *QuerySpec) error { + builder := sql.Dialect(drv.Dialect()) + qr := &query{graph: graph{builder: builder}, QuerySpec: spec} + return qr.nodes(ctx, drv) +} + +// CountNodes counts the nodes in the given graph query. +func CountNodes(ctx context.Context, drv dialect.Driver, spec *QuerySpec) (int, error) { + builder := sql.Dialect(drv.Dialect()) + qr := &query{graph: graph{builder: builder}, QuerySpec: spec} + return qr.count(ctx, drv) +} + +// EdgeQuerySpec holds the information for querying +// edges in the graph. +type EdgeQuerySpec struct { + Edge *EdgeSpec + Predicate func(*sql.Selector) + ScanValues func() [2]any + Assign func(out, in any) error +} + +// QueryEdges queries the edges in the graph and scans the result with the given dest function. +func QueryEdges(ctx context.Context, drv dialect.Driver, spec *EdgeQuerySpec) error { + if len(spec.Edge.Columns) != 2 { + return fmt.Errorf("sqlgraph: edge query requires 2 columns (out, in)") + } + out, in := spec.Edge.Columns[0], spec.Edge.Columns[1] + if spec.Edge.Inverse { + out, in = in, out + } + selector := sql.Dialect(drv.Dialect()). + Select(out, in). + From(sql.Table(spec.Edge.Table).Schema(spec.Edge.Schema)) + if p := spec.Predicate; p != nil { + p(selector) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := drv.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + for rows.Next() { + values := spec.ScanValues() + if err := rows.Scan(values[0], values[1]); err != nil { + return err + } + if err := spec.Assign(values[0], values[1]); err != nil { + return err + } + } + return rows.Err() +} + +type query struct { + graph + *QuerySpec +} + +func (q *query) nodes(ctx context.Context, drv dialect.Driver) error { + rows := &sql.Rows{} + selector, err := q.selector(ctx) + if err != nil { + return err + } + query, args := selector.Query() + if err := drv.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + columns, err := rows.Columns() + if err != nil { + return err + } + for rows.Next() { + values, err := q.ScanValues(columns) + if err != nil { + return err + } + for i, v := range values { + if _, ok := v.(*sql.UnknownType); ok { + values[i] = sql.ScanTypeOf(rows, i) + } + } + if err := rows.Scan(values...); err != nil { + return err + } + if err := q.Assign(columns, values); err != nil { + return err + } + } + return rows.Err() +} + +func (q *query) count(ctx context.Context, drv dialect.Driver) (int, error) { + rows := &sql.Rows{} + selector, err := q.selector(ctx) + if err != nil { + return 0, err + } + // Remove any ORDER BY clauses present in the COUNT query as + // they are not allowed in some databases, such as PostgreSQL. + if q.Order != nil { + selector.ClearOrder() + } + // If no columns were selected in count, + // the default selection is by node ids. + columns := q.Node.Columns + if len(columns) == 0 && q.Node.ID != nil { + columns = append(columns, q.Node.ID.Column) + } + for i, c := range columns { + columns[i] = selector.C(c) + } + if q.Unique { + selector.SetDistinct(false) + selector.Count(sql.Distinct(columns...)) + } else { + selector.Count(columns...) + } + query, args := selector.Query() + if err := drv.Query(ctx, query, args, rows); err != nil { + return 0, err + } + defer rows.Close() + return sql.ScanInt(rows) +} + +func (q *query) selector(ctx context.Context) (*sql.Selector, error) { + selector := q.builder. + Select(). + From(q.builder.Table(q.Node.Table).Schema(q.Node.Schema)). + WithContext(ctx) + if q.From != nil { + selector = q.From + } + selector.Select(selector.Columns(q.Node.Columns...)...) + if pred := q.Predicate; pred != nil { + pred(selector) + } + if order := q.Order; order != nil { + order(selector) + } + if q.Offset != 0 { + // Limit is mandatory for the offset clause. We start + // with default value, and override it below if needed. + selector.Offset(q.Offset).Limit(math.MaxInt32) + } + if q.Limit != 0 { + selector.Limit(q.Limit) + } + if q.Unique { + selector.Distinct() + } + for _, m := range q.Modifiers { + m(selector) + } + if err := selector.Err(); err != nil { + return nil, err + } + return selector, nil +} + +type updater struct { + graph + *UpdateSpec +} + +func (u *updater) node(ctx context.Context, tx dialect.ExecQuerier) error { + var ( + id driver.Value + idp *sql.Predicate + addEdges = EdgeSpecs(u.Edges.Add).GroupRel() + clearEdges = EdgeSpecs(u.Edges.Clear).GroupRel() + ) + switch { + // In case it is not an edge schema, the id holds the PK + // of the node used for linking it with the other nodes. + case u.Node.ID != nil: + id = u.Node.ID.Value + idp = sql.EQ(u.Node.ID.Column, id) + case len(u.Node.CompositeID) == 2: + idp = sql.And( + sql.EQ(u.Node.CompositeID[0].Column, u.Node.CompositeID[0].Value), + sql.EQ(u.Node.CompositeID[1].Column, u.Node.CompositeID[1].Value), + ) + case len(u.Node.CompositeID) != 2: + return fmt.Errorf("sql/sqlgraph: invalid composite id for update table %q", u.Node.Table) + default: + return fmt.Errorf("sql/sqlgraph: missing node id for update table %q", u.Node.Table) + } + update := u.builder.Update(u.Node.Table).Schema(u.Node.Schema).Where(idp) + if pred := u.Predicate; pred != nil { + selector := u.builder.Select().From(u.builder.Table(u.Node.Table).Schema(u.Node.Schema)) + pred(selector) + update.FromSelect(selector) + } + if err := u.setTableColumns(update, addEdges, clearEdges); err != nil { + return err + } + for _, m := range u.Modifiers { + m(update) + } + if err := update.Err(); err != nil { + return err + } + if !update.Empty() { + var res sql.Result + query, args := update.Query() + if err := tx.Exec(ctx, query, args, &res); err != nil { + return err + } + affected, err := res.RowsAffected() + if err != nil { + return err + } + // In case there are zero affected rows by this statement, we need to distinguish + // between the case of "record was not found" and "record was not changed". + if affected == 0 && u.Predicate != nil { + if err := u.ensureExists(ctx); err != nil { + return err + } + } + } + if id != nil { + // Not an edge schema. + if err := u.setExternalEdges(ctx, []driver.Value{id}, addEdges, clearEdges); err != nil { + return err + } + } + // Ignore querying the database when there's nothing + // to scan into it. + if u.ScanValues == nil { + return nil + } + selector := u.builder.Select(u.Node.Columns...). + From(u.builder.Table(u.Node.Table).Schema(u.Node.Schema)). + // Skip adding the custom predicates that were attached + // to the updater as they may point to columns that were + // changed by the UPDATE statement. + Where(idp) + rows := &sql.Rows{} + query, args := selector.Query() + if err := tx.Query(ctx, query, args, rows); err != nil { + return err + } + return u.scan(rows) +} + +func (u *updater) nodes(ctx context.Context, drv dialect.Driver) (int, error) { + var ( + addEdges = EdgeSpecs(u.Edges.Add).GroupRel() + clearEdges = EdgeSpecs(u.Edges.Clear).GroupRel() + multiple = hasExternalEdges(addEdges, clearEdges) + update = u.builder.Update(u.Node.Table).Schema(u.Node.Schema) + selector = u.builder.Select(). + From(u.builder.Table(u.Node.Table).Schema(u.Node.Schema)). + WithContext(ctx) + ) + switch { + // In case it is not an edge schema, the id holds the PK of + // the returned nodes are used for updating external tables. + case u.Node.ID != nil: + selector.Select(u.Node.ID.Column) + case len(u.Node.CompositeID) == 2: + // Other edge-schemas (M2M tables) cannot be updated by this operation. + // Also, in case there is a need to update an external foreign-key, it must + // be a single value and the user should use the "update by id" API instead. + if multiple { + return 0, fmt.Errorf("sql/sqlgraph: update edge schema table %q cannot update external tables", u.Node.Table) + } + case len(u.Node.CompositeID) != 2: + return 0, fmt.Errorf("sql/sqlgraph: invalid composite id for update table %q", u.Node.Table) + default: + return 0, fmt.Errorf("sql/sqlgraph: missing node id for update table %q", u.Node.Table) + } + if err := u.setTableColumns(update, addEdges, clearEdges); err != nil { + return 0, err + } + if pred := u.Predicate; pred != nil { + pred(selector) + } + // In case of single statement update, avoid opening a transaction manually. + if !multiple { + update.FromSelect(selector) + return u.updateTable(ctx, update) + } + tx, err := drv.Tx(ctx) + if err != nil { + return 0, err + } + u.tx = tx + affected, err := func() (int, error) { + var ( + ids []driver.Value + rows = &sql.Rows{} + query, args = selector.Query() + ) + if err := u.tx.Query(ctx, query, args, rows); err != nil { + return 0, fmt.Errorf("querying table %s: %w", u.Node.Table, err) + } + defer rows.Close() + if err := sql.ScanSlice(rows, &ids); err != nil { + return 0, fmt.Errorf("scan node ids: %w", err) + } + if err := rows.Close(); err != nil { + return 0, err + } + if len(ids) == 0 { + return 0, nil + } + update.Where(matchID(u.Node.ID.Column, ids)) + // In case of multi statement update, that change can + // affect more than 1 table, and therefore, we return + // the list of ids as number of affected records. + if _, err := u.updateTable(ctx, update); err != nil { + return 0, err + } + if err := u.setExternalEdges(ctx, ids, addEdges, clearEdges); err != nil { + return 0, err + } + return len(ids), nil + }() + if err != nil { + return 0, rollback(tx, err) + } + return affected, tx.Commit() +} + +func (u *updater) updateTable(ctx context.Context, stmt *sql.UpdateBuilder) (int, error) { + for _, m := range u.Modifiers { + m(stmt) + } + if err := stmt.Err(); err != nil { + return 0, err + } + if stmt.Empty() { + return 0, nil + } + var ( + res sql.Result + query, args = stmt.Query() + ) + if err := u.tx.Exec(ctx, query, args, &res); err != nil { + return 0, err + } + affected, err := res.RowsAffected() + if err != nil { + return 0, err + } + return int(affected), nil +} + +func (u *updater) setExternalEdges(ctx context.Context, ids []driver.Value, addEdges, clearEdges map[Rel][]*EdgeSpec) error { + if err := u.graph.clearM2MEdges(ctx, ids, clearEdges[M2M]); err != nil { + return err + } + if err := u.graph.addM2MEdges(ctx, ids, addEdges[M2M]); err != nil { + return err + } + if err := u.graph.clearFKEdges(ctx, ids, append(clearEdges[O2M], clearEdges[O2O]...)); err != nil { + return err + } + if err := u.graph.addFKEdges(ctx, ids, append(addEdges[O2M], addEdges[O2O]...)); err != nil { + return err + } + return nil +} + +// setTableColumns sets the table columns and foreign_keys used in insert. +func (u *updater) setTableColumns(update *sql.UpdateBuilder, addEdges, clearEdges map[Rel][]*EdgeSpec) error { + // Avoid multiple assignments to the same column. + setEdges := make(map[string]bool) + for _, e := range addEdges[M2O] { + setEdges[e.Columns[0]] = true + } + for _, e := range addEdges[O2O] { + if e.Inverse || e.Bidi { + setEdges[e.Columns[0]] = true + } + } + for _, fi := range u.Fields.Clear { + update.SetNull(fi.Column) + } + for _, e := range clearEdges[M2O] { + if col := e.Columns[0]; !setEdges[col] { + update.SetNull(col) + } + } + for _, e := range clearEdges[O2O] { + col := e.Columns[0] + if (e.Inverse || e.Bidi) && !setEdges[col] { + update.SetNull(col) + } + } + err := setTableColumns(u.Fields.Set, addEdges, func(column string, value driver.Value) { + update.Set(column, value) + }) + if err != nil { + return err + } + for _, fi := range u.Fields.Add { + update.Add(fi.Column, fi.Value) + } + return nil +} + +func (u *updater) scan(rows *sql.Rows) error { + defer rows.Close() + columns, err := rows.Columns() + if err != nil { + return err + } + if !rows.Next() { + if err := rows.Err(); err != nil { + return err + } + if len(u.Node.CompositeID) == 2 { + return &NotFoundError{table: u.Node.Table, id: []driver.Value{u.Node.CompositeID[0].Value, u.Node.CompositeID[1].Value}} + } + return &NotFoundError{table: u.Node.Table, id: u.Node.ID.Value} + } + values, err := u.ScanValues(columns) + if err != nil { + return err + } + for i, v := range values { + if _, ok := v.(*sql.UnknownType); ok { + values[i] = sql.ScanTypeOf(rows, i) + } + } + if err := rows.Scan(values...); err != nil { + return fmt.Errorf("failed scanning rows: %w", err) + } + if err := u.Assign(columns, values); err != nil { + return err + } + return nil +} + +func (u *updater) ensureExists(ctx context.Context) error { + exists := u.builder.Select().From(u.builder.Table(u.Node.Table).Schema(u.Node.Schema)).Where(sql.EQ(u.Node.ID.Column, u.Node.ID.Value)) + u.Predicate(exists) + query, args := u.builder.SelectExpr(sql.Exists(exists)).Query() + rows := &sql.Rows{} + if err := u.tx.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + found, err := sql.ScanBool(rows) + if err != nil { + return err + } + if !found { + return &NotFoundError{table: u.Node.Table, id: u.Node.ID.Value} + } + return nil +} + +type creator struct { + graph + *CreateSpec +} + +func (c *creator) node(ctx context.Context, drv dialect.Driver) error { + var ( + edges = EdgeSpecs(c.Edges).GroupRel() + insert = c.builder.Insert(c.Table).Schema(c.Schema).Default() + ) + if err := c.setTableColumns(insert, edges); err != nil { + return err + } + tx, err := c.mayTx(ctx, drv, edges) + if err != nil { + return err + } + if err := func() error { + // In case the spec does not contain an ID field, we assume + // we interact with an edge-schema with composite primary key. + if c.ID == nil { + c.ensureConflict(insert) + query, args, err := insert.QueryErr() + if err != nil { + return err + } + return c.tx.Exec(ctx, query, args, nil) + } + if err := c.insert(ctx, insert); err != nil { + return err + } + if err := c.graph.addM2MEdges(ctx, []driver.Value{c.ID.Value}, edges[M2M]); err != nil { + return err + } + return c.graph.addFKEdges(ctx, []driver.Value{c.ID.Value}, append(edges[O2M], edges[O2O]...)) + }(); err != nil { + return rollback(tx, err) + } + return tx.Commit() +} + +// mayTx opens a new transaction if the create operation spans across multiple statements. +func (c *creator) mayTx(ctx context.Context, drv dialect.Driver, edges map[Rel][]*EdgeSpec) (dialect.Tx, error) { + if !hasExternalEdges(edges, nil) { + return dialect.NopTx(drv), nil + } + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + c.tx = tx + return tx, nil +} + +// setTableColumns sets the table columns and foreign_keys used in insert. +func (c *creator) setTableColumns(insert *sql.InsertBuilder, edges map[Rel][]*EdgeSpec) error { + err := setTableColumns(c.Fields, edges, func(column string, value driver.Value) { + insert.Set(column, value) + }) + return err +} + +// insert a node to its table and sets its ID if it was not provided by the user. +func (c *creator) insert(ctx context.Context, insert *sql.InsertBuilder) error { + c.ensureConflict(insert) + // If the id field was provided by the user. + if c.ID.Value != nil { + insert.Set(c.ID.Column, c.ID.Value) + // In case of "ON CONFLICT", the record may exist in the + // database, and we need to get back the database id field. + if len(c.CreateSpec.OnConflict) == 0 { + query, args, err := insert.QueryErr() + if err != nil { + return err + } + return c.tx.Exec(ctx, query, args, nil) + } + } + return c.insertLastID(ctx, insert.Returning(c.ID.Column)) +} + +// ensureConflict ensures the ON CONFLICT is added to the insert statement. +func (c *creator) ensureConflict(insert *sql.InsertBuilder) { + if opts := c.CreateSpec.OnConflict; len(opts) > 0 { + insert.OnConflict(opts...) + c.ensureLastInsertID(insert) + } +} + +// ensureLastInsertID ensures the LAST_INSERT_ID was added to the +// 'ON DUPLICATE ... UPDATE' clause in it was not provided. +func (c *creator) ensureLastInsertID(insert *sql.InsertBuilder) { + if c.ID == nil || !c.ID.Type.Numeric() || c.ID.Value != nil || insert.Dialect() != dialect.MySQL { + return + } + insert.OnConflict(sql.ResolveWith(func(s *sql.UpdateSet) { + for _, column := range s.UpdateColumns() { + if column == c.ID.Column { + return + } + } + s.Set(c.ID.Column, sql.Expr(fmt.Sprintf("LAST_INSERT_ID(%s)", s.Table().C(c.ID.Column)))) + })) +} + +type batchCreator struct { + graph + *BatchCreateSpec +} + +func (c *batchCreator) nodes(ctx context.Context, drv dialect.Driver) error { + if len(c.Nodes) == 0 { + return nil + } + columns := make(map[string]struct{}) + values := make([]map[string]driver.Value, len(c.Nodes)) + for i, node := range c.Nodes { + if i > 0 && node.Table != c.Nodes[i-1].Table { + return fmt.Errorf("more than 1 table for batch insert: %q != %q", node.Table, c.Nodes[i-1].Table) + } + values[i] = make(map[string]driver.Value) + if node.ID != nil && node.ID.Value != nil { + columns[node.ID.Column] = struct{}{} + values[i][node.ID.Column] = node.ID.Value + } + edges := EdgeSpecs(node.Edges).GroupRel() + err := setTableColumns(node.Fields, edges, func(column string, value driver.Value) { + columns[column] = struct{}{} + values[i][column] = value + }) + if err != nil { + return err + } + } + for column := range columns { + for i := range values { + if _, exists := values[i][column]; !exists { + if c.Nodes[i].ID != nil && column == c.Nodes[i].ID.Column { + // If the ID value was provided to one of the nodes, it should be + // provided to all others because this affects the way we calculate + // their values in MySQL and SQLite dialects. + return fmt.Errorf("inconsistent id values for batch insert") + } + // Assign NULL values for empty placeholders. + values[i][column] = nil + } + } + } + sorted := keys(columns) + insert := c.builder.Insert(c.Nodes[0].Table).Schema(c.Nodes[0].Schema).Default().Columns(sorted...) + for i := range values { + vs := make([]any, len(sorted)) + for j, c := range sorted { + vs[j] = values[i][c] + } + insert.Values(vs...) + } + tx, err := c.mayTx(ctx, drv) + if err != nil { + return err + } + c.tx = tx + if err := func() error { + // In case the spec does not contain an ID field, we assume + // we interact with an edge-schema with composite primary key. + if c.Nodes[0].ID == nil { + c.ensureConflict(insert) + query, args := insert.Query() + return tx.Exec(ctx, query, args, nil) + } + if err := c.batchInsert(ctx, tx, insert); err != nil { + return fmt.Errorf("insert nodes to table %q: %w", c.Nodes[0].Table, err) + } + if err := c.batchAddM2M(ctx, c.BatchCreateSpec); err != nil { + return err + } + // FKs that exist in different tables can't be updated in batch (using the CASE + // statement), because we rely on RowsAffected to check if the FK column is NULL. + for _, node := range c.Nodes { + edges := EdgeSpecs(node.Edges).GroupRel() + if err := c.graph.addFKEdges(ctx, []driver.Value{node.ID.Value}, append(edges[O2M], edges[O2O]...)); err != nil { + return err + } + } + return nil + }(); err != nil { + return rollback(tx, err) + } + return tx.Commit() +} + +// mayTx opens a new transaction if the create operation spans across multiple statements. +func (c *batchCreator) mayTx(ctx context.Context, drv dialect.Driver) (dialect.Tx, error) { + for _, node := range c.Nodes { + for _, edge := range node.Edges { + if isExternalEdge(edge) { + return drv.Tx(ctx) + } + } + } + return dialect.NopTx(drv), nil +} + +// batchInsert inserts a batch of nodes to their table and sets their ID if it was not provided by the user. +func (c *batchCreator) batchInsert(ctx context.Context, tx dialect.ExecQuerier, insert *sql.InsertBuilder) error { + c.ensureConflict(insert) + return c.insertLastIDs(ctx, tx, insert.Returning(c.Nodes[0].ID.Column)) +} + +// ensureConflict ensures the ON CONFLICT is added to the insert statement. +func (c *batchCreator) ensureConflict(insert *sql.InsertBuilder) { + if opts := c.BatchCreateSpec.OnConflict; len(opts) > 0 { + insert.OnConflict(opts...) + } +} + +// GroupRel groups edges by their relation type. +func (es EdgeSpecs) GroupRel() map[Rel][]*EdgeSpec { + edges := make(map[Rel][]*EdgeSpec) + for _, edge := range es { + edges[edge.Rel] = append(edges[edge.Rel], edge) + } + return edges +} + +// GroupTable groups edges by their table name. +func (es EdgeSpecs) GroupTable() map[string][]*EdgeSpec { + edges := make(map[string][]*EdgeSpec) + for _, edge := range es { + edges[edge.Table] = append(edges[edge.Table], edge) + } + return edges +} + +// FilterRel returns edges for the given relation type. +func (es EdgeSpecs) FilterRel(r Rel) EdgeSpecs { + edges := make([]*EdgeSpec, 0, len(es)) + for _, edge := range es { + if edge.Rel == r { + edges = append(edges, edge) + } + } + return edges +} + +// The common operations shared between the different builders. +// +// M2M edges reside in join tables and require INSERT and DELETE +// queries for adding or removing edges respectively. +// +// O2M and non-inverse O2O edges also reside in external tables, +// but use UPDATE queries (fk = ?, fk = NULL). +type graph struct { + tx dialect.ExecQuerier + builder *sql.DialectBuilder +} + +func (g *graph) clearM2MEdges(ctx context.Context, ids []driver.Value, edges EdgeSpecs) error { + // Remove all M2M edges from the same type at once. + // The EdgeSpec is the same for all members in a group. + tables := edges.GroupTable() + for _, table := range edgeKeys(tables) { + edges := tables[table] + preds := make([]*sql.Predicate, 0, len(edges)) + for _, edge := range edges { + fromC, toC := edge.Columns[0], edge.Columns[1] + if edge.Inverse { + fromC, toC = toC, fromC + } + // If there are no specific edges (to target-nodes) to remove, + // clear all edges that go out (or come in) from the nodes. + if len(edge.Target.Nodes) == 0 { + preds = append(preds, matchID(fromC, ids)) + if edge.Bidi { + preds = append(preds, matchID(toC, ids)) + } + } else { + pk1, pk2 := ids, edge.Target.Nodes + preds = append(preds, matchIDs(fromC, pk1, toC, pk2)) + if edge.Bidi { + preds = append(preds, matchIDs(toC, pk1, fromC, pk2)) + } + } + } + deleter := g.builder.Delete(table).Where(sql.Or(preds...)) + if edges[0].Schema != "" { + // If the Schema field was provided to the EdgeSpec (by the + // generated code), it should be the same for all EdgeSpecs. + deleter.Schema(edges[0].Schema) + } + query, args := deleter.Query() + if err := g.tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("remove m2m edge for table %s: %w", table, err) + } + } + return nil +} + +func (g *graph) addM2MEdges(ctx context.Context, ids []driver.Value, edges EdgeSpecs) error { + // Insert all M2M edges from the same type at once. + // The EdgeSpec is the same for all members in a group. + tables := edges.GroupTable() + for _, table := range edgeKeys(tables) { + var ( + edges = tables[table] + columns = edges[0].Columns + values = make([]any, 0, len(edges[0].Target.Fields)) + ) + // Additional fields, such as edge-schema fields. Note, we use the first index, + // because Ent generates the same spec fields for all edges from the same type. + for _, f := range edges[0].Target.Fields { + values = append(values, f.Value) + columns = append(columns, f.Column) + } + insert := g.builder.Insert(table).Columns(columns...) + if edges[0].Schema != "" { + // If the Schema field was provided to the EdgeSpec (by the + // generated code), it should be the same for all EdgeSpecs. + insert.Schema(edges[0].Schema) + } + for _, edge := range edges { + pk1, pk2 := ids, edge.Target.Nodes + if edge.Inverse { + pk1, pk2 = pk2, pk1 + } + for _, pair := range product(pk1, pk2) { + insert.Values(append([]any{pair[0], pair[1]}, values...)...) + if edge.Bidi { + insert.Values(append([]any{pair[1], pair[0]}, values...)...) + } + } + } + // Ignore conflicts only if edges do not contain extra fields, because these fields + // can hold different values on different insertions (e.g. time.Now() or uuid.New()). + if len(edges[0].Target.Fields) == 0 { + insert.OnConflict(sql.DoNothing()) + } + query, args := insert.Query() + if err := g.tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("add m2m edge for table %s: %w", table, err) + } + } + return nil +} + +func (g *graph) batchAddM2M(ctx context.Context, spec *BatchCreateSpec) error { + tables := make(map[string]*sql.InsertBuilder) + for _, node := range spec.Nodes { + edges := EdgeSpecs(node.Edges).FilterRel(M2M) + for name, edges := range edges.GroupTable() { + if len(edges) != 1 { + return fmt.Errorf("expect exactly 1 edge-spec per table, but got %d", len(edges)) + } + edge := edges[0] + insert, ok := tables[name] + if !ok { + columns := edge.Columns + // Additional fields, such as edge-schema fields. + for _, f := range edge.Target.Fields { + columns = append(columns, f.Column) + } + insert = g.builder.Insert(name).Columns(columns...) + if edge.Schema != "" { + // If the Schema field was provided to the EdgeSpec (by the + // generated code), it should be the same for all EdgeSpecs. + insert.Schema(edge.Schema) + } + // Ignore conflicts only if edges do not contain extra fields, because these fields + // can hold different values on different insertions (e.g. time.Now() or uuid.New()). + if len(edge.Target.Fields) == 0 { + insert.OnConflict(sql.DoNothing()) + } + } + tables[name] = insert + pk1, pk2 := []driver.Value{node.ID.Value}, edge.Target.Nodes + if edge.Inverse { + pk1, pk2 = pk2, pk1 + } + for _, pair := range product(pk1, pk2) { + insert.Values(append([]any{pair[0], pair[1]}, edge.Target.FieldValues()...)...) + if edge.Bidi { + insert.Values(append([]any{pair[1], pair[0]}, edge.Target.FieldValues()...)...) + } + } + } + } + for _, table := range insertKeys(tables) { + query, args := tables[table].Query() + if err := g.tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("add m2m edge for table %s: %w", table, err) + } + } + return nil +} + +func (g *graph) clearFKEdges(ctx context.Context, ids []driver.Value, edges []*EdgeSpec) error { + for _, edge := range edges { + if edge.Rel == O2O && edge.Inverse { + continue + } + // O2O relations can be cleared without + // passing the target ids. + pred := matchID(edge.Columns[0], ids) + if nodes := edge.Target.Nodes; len(nodes) > 0 { + pred = matchIDs(edge.Target.IDSpec.Column, edge.Target.Nodes, edge.Columns[0], ids) + } + query, args := g.builder.Update(edge.Table). + SetNull(edge.Columns[0]). + Where(pred). + Query() + if err := g.tx.Exec(ctx, query, args, nil); err != nil { + return fmt.Errorf("add %s edge for table %s: %w", edge.Rel, edge.Table, err) + } + } + return nil +} + +func (g *graph) addFKEdges(ctx context.Context, ids []driver.Value, edges []*EdgeSpec) error { + id := ids[0] + if len(ids) > 1 && len(edges) != 0 { + // O2M and non-inverse O2O edges are defined by a FK in the "other" + // table. Therefore, ids[i+1] will override ids[i] which is invalid. + return fmt.Errorf("unable to link FK edge to more than 1 node: %v", ids) + } + for _, edge := range edges { + if edge.Rel == O2O && edge.Inverse { + continue + } + p := sql.EQ(edge.Target.IDSpec.Column, edge.Target.Nodes[0]) + // Use "IN" predicate instead of list of "OR" + // in case of more than on nodes to connect. + if len(edge.Target.Nodes) > 1 { + p = sql.InValues(edge.Target.IDSpec.Column, edge.Target.Nodes...) + } + query, args := g.builder.Update(edge.Table). + Schema(edge.Schema). + Set(edge.Columns[0], id). + Where(sql.And(p, sql.IsNull(edge.Columns[0]))). + Query() + var res sql.Result + if err := g.tx.Exec(ctx, query, args, &res); err != nil { + return fmt.Errorf("add %s edge for table %s: %w", edge.Rel, edge.Table, err) + } + affected, err := res.RowsAffected() + if err != nil { + return err + } + // Setting the FK value of the "other" table without clearing it before, is not allowed. + // Including no-op (same id), because we rely on "affected" to determine if the FK set. + if ids := edge.Target.Nodes; int(affected) < len(ids) { + return &ConstraintError{msg: fmt.Sprintf("one of %v is already connected to a different %s", ids, edge.Columns[0])} + } + } + return nil +} + +func hasExternalEdges(addEdges, clearEdges map[Rel][]*EdgeSpec) bool { + // M2M edges reside in a join-table, and O2M edges reside + // in the M2O table (the entity that holds the FK). + if len(clearEdges[M2M]) > 0 || len(addEdges[M2M]) > 0 || + len(clearEdges[O2M]) > 0 || len(addEdges[O2M]) > 0 { + return true + } + for _, edges := range [][]*EdgeSpec{clearEdges[O2O], addEdges[O2O]} { + for _, e := range edges { + if !e.Inverse { + return true + } + } + } + return false +} + +// isExternalEdge reports if the given edge requires an UPDATE +// or an INSERT to other table. +func isExternalEdge(e *EdgeSpec) bool { + return e.Rel == M2M || e.Rel == O2M || e.Rel == O2O && !e.Inverse +} + +// setTableColumns is shared between updater and creator. +func setTableColumns(fields []*FieldSpec, edges map[Rel][]*EdgeSpec, set func(string, driver.Value)) (err error) { + for _, fi := range fields { + value := fi.Value + if fi.Type == field.TypeJSON { + buf, err := json.Marshal(value) + if err != nil { + return fmt.Errorf("marshal value for column %s: %w", fi.Column, err) + } + // If the underlying driver does not support JSON types, + // driver.DefaultParameterConverter will convert it to uint8. + value = json.RawMessage(buf) + } + set(fi.Column, value) + } + for _, e := range edges[M2O] { + set(e.Columns[0], e.Target.Nodes[0]) + } + for _, e := range edges[O2O] { + if e.Inverse || e.Bidi { + set(e.Columns[0], e.Target.Nodes[0]) + } + } + return nil +} + +// insertLastID invokes the insert query on the transaction and returns the LastInsertID. +func (c *creator) insertLastID(ctx context.Context, insert *sql.InsertBuilder) error { + query, args, err := insert.QueryErr() + if err != nil { + return err + } + // MySQL does not support the "RETURNING" clause. + if insert.Dialect() != dialect.MySQL { + rows := &sql.Rows{} + if err := c.tx.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + switch _, ok := c.ID.Value.(field.ValueScanner); { + case ok: + // If the ID implements the sql.Scanner + // interface it should be a pointer type. + return sql.ScanOne(rows, c.ID.Value) + case c.ID.Type.Numeric(): + // Normalize the type to int64 to make it + // looks like LastInsertId. + id, err := sql.ScanInt64(rows) + if err != nil { + return err + } + c.ID.Value = id + return nil + default: + return sql.ScanOne(rows, &c.ID.Value) + } + } + // MySQL. + var res sql.Result + if err := c.tx.Exec(ctx, query, args, &res); err != nil { + return err + } + // If the ID field is not numeric (e.g. string), + // there is no way to scan the LAST_INSERT_ID. + if c.ID.Type.Numeric() { + id, err := res.LastInsertId() + if err != nil { + return err + } + c.ID.Value = id + } + return nil +} + +// insertLastIDs invokes the batch insert query on the transaction and returns the LastInsertID of all entities. +func (c *batchCreator) insertLastIDs(ctx context.Context, tx dialect.ExecQuerier, insert *sql.InsertBuilder) error { + query, args, err := insert.QueryErr() + if err != nil { + return err + } + // MySQL does not support the "RETURNING" clause. + if insert.Dialect() != dialect.MySQL { + rows := &sql.Rows{} + if err := tx.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + for i := 0; rows.Next(); i++ { + node := c.Nodes[i] + if node.ID.Type.Numeric() { + // Normalize the type to int64 to make it looks + // like LastInsertId. + var id int64 + if err := rows.Scan(&id); err != nil { + return err + } + node.ID.Value = id + } else if err := rows.Scan(&node.ID.Value); err != nil { + return err + } + } + return rows.Err() + } + // MySQL. + var res sql.Result + if err := tx.Exec(ctx, query, args, &res); err != nil { + return err + } + // If the ID field is not numeric (e.g. string), + // there is no way to scan the LAST_INSERT_ID. + if len(c.Nodes) > 0 && c.Nodes[0].ID.Type.Numeric() { + id, err := res.LastInsertId() + if err != nil { + return err + } + affected, err := res.RowsAffected() + if err != nil { + return err + } + // Assume the ID field is AUTO_INCREMENT + // if its type is numeric. + for i := 0; int64(i) < affected && i < len(c.Nodes); i++ { + c.Nodes[i].ID.Value = id + int64(i) + } + } + return nil +} + +// rollback calls to tx.Rollback and wraps the given error with the rollback error if occurred. +func rollback(tx dialect.Tx, err error) error { + if rerr := tx.Rollback(); rerr != nil { + err = fmt.Errorf("%w: %v", err, rerr) + } + return err +} + +func edgeKeys(m map[string][]*EdgeSpec) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func insertKeys(m map[string]*sql.InsertBuilder) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func keys(m map[string]struct{}) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func matchID(column string, pk []driver.Value) *sql.Predicate { + if len(pk) > 1 { + return sql.InValues(column, pk...) + } + return sql.EQ(column, pk[0]) +} + +func matchIDs(column1 string, pk1 []driver.Value, column2 string, pk2 []driver.Value) *sql.Predicate { + p := matchID(column1, pk1) + if len(pk2) > 1 { + // Use "IN" predicate instead of list of "OR" + // in case of more than on nodes to connect. + return sql.And(p, sql.InValues(column2, pk2...)) + } + return sql.And(p, sql.EQ(column2, pk2[0])) +} + +// cartesian product of 2 id sets. +func product(a, b []driver.Value) [][2]driver.Value { + c := make([][2]driver.Value, 0, len(a)*len(b)) + for i := range a { + for j := range b { + c = append(c, [2]driver.Value{a[i], b[j]}) + } + } + return c +} diff --git a/vendor/entgo.io/ent/dialect/sql/sqljson/BUILD b/vendor/entgo.io/ent/dialect/sql/sqljson/BUILD new file mode 100644 index 00000000..8698b2bd --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/sqljson/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sqljson", + srcs = [ + "dialect.go", + "sqljson.go", + ], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/dialect/sql/sqljson", + importpath = "entgo.io/ent/dialect/sql/sqljson", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect", + "//vendor/entgo.io/ent/dialect/sql", + ], +) diff --git a/vendor/entgo.io/ent/dialect/sql/sqljson/dialect.go b/vendor/entgo.io/ent/dialect/sql/sqljson/dialect.go new file mode 100644 index 00000000..f65a895c --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/sqljson/dialect.go @@ -0,0 +1,222 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqljson + +import ( + "fmt" + "reflect" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" +) + +type sqlite struct{} + +// Append implements the driver.Append method. +func (d *sqlite) Append(u *sql.UpdateBuilder, column string, elems []any, opts ...Option) { + setCase(u, column, when{ + Cond: func(b *sql.Builder) { + typ := func(b *sql.Builder) *sql.Builder { + return b.WriteString("JSON_TYPE").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + identPath(column, opts...).mysqlPath(b) + }) + } + typ(b).WriteOp(sql.OpIsNull) + b.WriteString(" OR ") + typ(b).WriteOp(sql.OpEQ).WriteString("'null'") + }, + Then: func(b *sql.Builder) { + if len(opts) > 0 { + b.WriteString("JSON_SET").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + identPath(column, opts...).mysqlPath(b) + b.Comma().Argf("JSON(?)", marshalArg(elems)) + }) + } else { + b.Arg(marshalArg(elems)) + } + }, + Else: func(b *sql.Builder) { + b.WriteString("JSON_INSERT").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + // If no path was provided the top-level value is + // a JSON array. i.e. JSON_INSERT(c, '$[#]', ?). + path := func(b *sql.Builder) { b.WriteString("'$[#]'") } + if len(opts) > 0 { + p := identPath(column, opts...) + p.Path = append(p.Path, "[#]") + path = p.mysqlPath + } + for i, e := range elems { + if i > 0 { + b.Comma() + } + path(b) + b.Comma() + d.appendArg(b, e) + } + }) + }, + }) +} + +func (d *sqlite) appendArg(b *sql.Builder, v any) { + switch { + case !isPrimitive(v): + b.Argf("JSON(?)", marshalArg(v)) + default: + b.Arg(v) + } +} + +type mysql struct{} + +// Append implements the driver.Append method. +func (d *mysql) Append(u *sql.UpdateBuilder, column string, elems []any, opts ...Option) { + setCase(u, column, when{ + Cond: func(b *sql.Builder) { + typ := func(b *sql.Builder) *sql.Builder { + b.WriteString("JSON_TYPE(JSON_EXTRACT(") + b.Ident(column).Comma() + identPath(column, opts...).mysqlPath(b) + return b.WriteString("))") + } + typ(b).WriteOp(sql.OpIsNull) + b.WriteString(" OR ") + typ(b).WriteOp(sql.OpEQ).WriteString("'NULL'") + }, + Then: func(b *sql.Builder) { + if len(opts) > 0 { + b.WriteString("JSON_SET").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + identPath(column, opts...).mysqlPath(b) + b.Comma().WriteString("JSON_ARRAY(").Args(d.marshalArgs(elems)...).WriteByte(')') + }) + } else { + b.WriteString("JSON_ARRAY(").Args(d.marshalArgs(elems)...).WriteByte(')') + } + }, + Else: func(b *sql.Builder) { + b.WriteString("JSON_ARRAY_APPEND").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + for i, e := range elems { + if i > 0 { + b.Comma() + } + identPath(column, opts...).mysqlPath(b) + b.Comma() + d.appendArg(b, e) + } + }) + }, + }) +} + +func (d *mysql) marshalArgs(args []any) []any { + vs := make([]any, len(args)) + for i, v := range args { + if !isPrimitive(v) { + v = marshalArg(v) + } + vs[i] = v + } + return vs +} + +func (d *mysql) appendArg(b *sql.Builder, v any) { + switch { + case !isPrimitive(v): + b.Argf("CAST(? AS JSON)", marshalArg(v)) + default: + b.Arg(v) + } +} + +type postgres struct{} + +// Append implements the driver.Append method. +func (*postgres) Append(u *sql.UpdateBuilder, column string, elems []any, opts ...Option) { + setCase(u, column, when{ + Cond: func(b *sql.Builder) { + valuePath(b, column, append(opts, Cast("jsonb"))...) + b.WriteOp(sql.OpIsNull) + b.WriteString(" OR ") + valuePath(b, column, append(opts, Cast("jsonb"))...) + b.WriteOp(sql.OpEQ).WriteString("'null'::jsonb") + }, + Then: func(b *sql.Builder) { + if len(opts) > 0 { + b.WriteString("jsonb_set").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + identPath(column, opts...).pgArrayPath(b) + b.Comma().Arg(marshalArg(elems)) + b.Comma().WriteString("true") + }) + } else { + b.Arg(marshalArg(elems)) + } + }, + Else: func(b *sql.Builder) { + if len(opts) > 0 { + b.WriteString("jsonb_set").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + identPath(column, opts...).pgArrayPath(b) + b.Comma() + path := identPath(column, opts...) + path.value(b) + b.WriteString(" || ").Arg(marshalArg(elems)) + b.Comma().WriteString("true") + }) + } else { + b.Ident(column).WriteString(" || ").Arg(marshalArg(elems)) + } + }, + }) +} + +// driver groups all dialect-specific methods. +type driver interface { + Append(u *sql.UpdateBuilder, column string, elems []any, opts ...Option) +} + +func newDriver(name string) (driver, error) { + switch name { + case dialect.SQLite: + return (*sqlite)(nil), nil + case dialect.MySQL: + return (*mysql)(nil), nil + case dialect.Postgres: + return (*postgres)(nil), nil + default: + return nil, fmt.Errorf("sqljson: unknown driver %q", name) + } +} + +type when struct{ Cond, Then, Else func(*sql.Builder) } + +// setCase sets the column value using the "CASE WHEN" statement. +// The x defines the condition/predicate, t is the true (if) case, +// and 'f' defines the false (else). +func setCase(u *sql.UpdateBuilder, column string, w when) { + u.Set(column, sql.ExprFunc(func(b *sql.Builder) { + b.WriteString("CASE WHEN ").Wrap(func(b *sql.Builder) { + w.Cond(b) + }) + b.WriteString(" THEN ") + w.Then(b) + b.WriteString(" ELSE ") + w.Else(b) + b.WriteString(" END") + })) +} + +func isPrimitive(v any) bool { + switch reflect.TypeOf(v).Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.Struct, reflect.Ptr, reflect.Interface: + return false + } + return true +} diff --git a/vendor/entgo.io/ent/dialect/sql/sqljson/sqljson.go b/vendor/entgo.io/ent/dialect/sql/sqljson/sqljson.go new file mode 100644 index 00000000..52bf0cfb --- /dev/null +++ b/vendor/entgo.io/ent/dialect/sql/sqljson/sqljson.go @@ -0,0 +1,720 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package sqljson + +import ( + "encoding/json" + "fmt" + "strings" + "unicode" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" +) + +// HasKey return a predicate for checking that a JSON key +// exists and not NULL. +// +// sqljson.HasKey("column", sql.DotPath("a.b[2].c")) +func HasKey(column string, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + switch b.Dialect() { + case dialect.SQLite: + // JSON_TYPE returns NULL in case the path selects an element + // that does not exist. See: https://sqlite.org/json1.html#jtype. + path := identPath(column, opts...) + path.mysqlFunc("JSON_TYPE", b) + b.WriteOp(sql.OpNotNull) + default: + valuePath(b, column, opts...) + b.WriteOp(sql.OpNotNull) + } + }) +} + +// ValueIsNull return a predicate for checking that a JSON value +// (returned by the path) is a null literal (JSON "null"). +// +// In order to check if the column is NULL (database NULL), or if +// the JSON key exists, use sql.IsNull or sqljson.HasKey. +// +// sqljson.ValueIsNull("a", sqljson.Path("b")) +func ValueIsNull(column string, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + switch b.Dialect() { + case dialect.MySQL: + path := identPath(column, opts...) + b.WriteString("JSON_CONTAINS").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + b.WriteString("'null'").Comma() + path.mysqlPath(b) + }) + case dialect.Postgres: + valuePath(b, column, append(opts, Cast("jsonb"))...) + b.WriteOp(sql.OpEQ).WriteString("'null'::jsonb") + case dialect.SQLite: + path := identPath(column, opts...) + path.mysqlFunc("JSON_TYPE", b) + b.WriteOp(sql.OpEQ).WriteString("'null'") + } + }) +} + +// ValueIsNotNull return a predicate for checking that a JSON value +// (returned by the path) is not null literal (JSON "null"). +// +// sqljson.ValueIsNotNull("a", sqljson.Path("b")) +func ValueIsNotNull(column string, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + switch b.Dialect() { + case dialect.Postgres: + valuePath(b, column, append(opts, Cast("jsonb"))...) + b.WriteOp(sql.OpNEQ).WriteString("'null'::jsonb") + case dialect.SQLite: + path := identPath(column, opts...) + path.mysqlFunc("JSON_TYPE", b) + b.WriteOp(sql.OpNEQ).WriteString("'null'") + case dialect.MySQL: + path := identPath(column, opts...) + b.WriteString("NOT(JSON_CONTAINS").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + b.WriteString("'null'").Comma() + path.mysqlPath(b) + }).WriteString(")") + } + }) +} + +// ValueEQ return a predicate for checking that a JSON value +// (returned by the path) is equal to the given argument. +// +// sqljson.ValueEQ("a", 1, sqljson.Path("b")) +func ValueEQ(column string, arg any, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + opts = normalizePG(b, arg, opts) + valuePath(b, column, opts...) + b.WriteOp(sql.OpEQ).Arg(arg) + }) +} + +// ValueNEQ return a predicate for checking that a JSON value +// (returned by the path) is not equal to the given argument. +// +// sqljson.ValueNEQ("a", 1, sqljson.Path("b")) +func ValueNEQ(column string, arg any, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + opts = normalizePG(b, arg, opts) + valuePath(b, column, opts...) + b.WriteOp(sql.OpNEQ).Arg(arg) + }) +} + +// ValueGT return a predicate for checking that a JSON value +// (returned by the path) is greater than the given argument. +// +// sqljson.ValueGT("a", 1, sqljson.Path("b")) +func ValueGT(column string, arg any, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + opts = normalizePG(b, arg, opts) + valuePath(b, column, opts...) + b.WriteOp(sql.OpGT).Arg(arg) + }) +} + +// ValueGTE return a predicate for checking that a JSON value +// (returned by the path) is greater than or equal to the given +// argument. +// +// sqljson.ValueGTE("a", 1, sqljson.Path("b")) +func ValueGTE(column string, arg any, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + opts = normalizePG(b, arg, opts) + valuePath(b, column, opts...) + b.WriteOp(sql.OpGTE).Arg(arg) + }) +} + +// ValueLT return a predicate for checking that a JSON value +// (returned by the path) is less than the given argument. +// +// sqljson.ValueLT("a", 1, sqljson.Path("b")) +func ValueLT(column string, arg any, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + opts = normalizePG(b, arg, opts) + valuePath(b, column, opts...) + b.WriteOp(sql.OpLT).Arg(arg) + }) +} + +// ValueLTE return a predicate for checking that a JSON value +// (returned by the path) is less than or equal to the given +// argument. +// +// sqljson.ValueLTE("a", 1, sqljson.Path("b")) +func ValueLTE(column string, arg any, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + opts = normalizePG(b, arg, opts) + valuePath(b, column, opts...) + b.WriteOp(sql.OpLTE).Arg(arg) + }) +} + +// ValueContains return a predicate for checking that a JSON +// value (returned by the path) contains the given argument. +// +// sqljson.ValueContains("a", 1, sqljson.Path("b")) +func ValueContains(column string, arg any, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + path := identPath(column, opts...) + switch b.Dialect() { + case dialect.MySQL: + b.WriteString("JSON_CONTAINS").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + b.Arg(marshalArg(arg)).Comma() + path.mysqlPath(b) + }) + b.WriteOp(sql.OpEQ).Arg(1) + case dialect.SQLite: + b.WriteString("EXISTS").Wrap(func(b *sql.Builder) { + b.WriteString("SELECT * FROM JSON_EACH").Wrap(func(b *sql.Builder) { + b.Ident(column).Comma() + path.mysqlPath(b) + }) + b.WriteString(" WHERE ").Ident("value").WriteOp(sql.OpEQ).Arg(arg) + }) + case dialect.Postgres: + opts = normalizePG(b, arg, opts) + path.Cast = "jsonb" + path.value(b) + b.WriteString(" @> ").Arg(marshalArg(arg)) + } + }) +} + +// StringHasPrefix return a predicate for checking that a JSON string value +// (returned by the path) has the given substring as prefix +func StringHasPrefix(column string, prefix string, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + opts = append([]Option{Unquote(true)}, opts...) + valuePath(b, column, opts...) + b.Join(sql.HasPrefix("", prefix)) + }) +} + +// StringHasSuffix return a predicate for checking that a JSON string value +// (returned by the path) has the given substring as suffix +func StringHasSuffix(column string, suffix string, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + opts = append([]Option{Unquote(true)}, opts...) + valuePath(b, column, opts...) + b.Join(sql.HasSuffix("", suffix)) + }) +} + +// StringContains return a predicate for checking that a JSON string value +// (returned by the path) contains the given substring +func StringContains(column string, sub string, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + opts = append([]Option{Unquote(true)}, opts...) + valuePath(b, column, opts...) + b.Join(sql.Contains("", sub)) + }) +} + +// ValueIn return a predicate for checking that a JSON value +// (returned by the path) is IN the given arguments. +// +// sqljson.ValueIn("a", []any{1, 2, 3}, sqljson.Path("b")) +func ValueIn(column string, args []any, opts ...Option) *sql.Predicate { + return valueInOp(column, args, opts, sql.OpIn) +} + +// ValueNotIn return a predicate for checking that a JSON value +// (returned by the path) is NOT IN the given arguments. +// +// sqljson.ValueNotIn("a", []any{1, 2, 3}, sqljson.Path("b")) +func ValueNotIn(column string, args []any, opts ...Option) *sql.Predicate { + if len(args) == 0 { + return sql.NotIn(column) + } + return valueInOp(column, args, opts, sql.OpNotIn) +} + +func valueInOp(column string, args []any, opts []Option, op sql.Op) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + if allString(args) { + opts = append(opts, Unquote(true)) + } + if len(args) > 0 { + opts = normalizePG(b, args[0], opts) + } + valuePath(b, column, opts...) + b.WriteOp(op) + b.Wrap(func(b *sql.Builder) { + if s, ok := args[0].(*sql.Selector); ok { + b.Join(s) + } else { + b.Args(args...) + } + }) + }) +} + +// LenEQ return a predicate for checking that an array length +// of a JSON (returned by the path) is equal to the given argument. +// +// sqljson.LenEQ("a", 1, sqljson.Path("b")) +func LenEQ(column string, size int, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + lenPath(b, column, opts...) + b.WriteOp(sql.OpEQ).Arg(size) + }) +} + +// LenNEQ return a predicate for checking that an array length +// of a JSON (returned by the path) is not equal to the given argument. +// +// sqljson.LenEQ("a", 1, sqljson.Path("b")) +func LenNEQ(column string, size int, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + lenPath(b, column, opts...) + b.WriteOp(sql.OpNEQ).Arg(size) + }) +} + +// LenGT return a predicate for checking that an array length +// of a JSON (returned by the path) is greater than the given +// argument. +// +// sqljson.LenGT("a", 1, sqljson.Path("b")) +func LenGT(column string, size int, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + lenPath(b, column, opts...) + b.WriteOp(sql.OpGT).Arg(size) + }) +} + +// LenGTE return a predicate for checking that an array length +// of a JSON (returned by the path) is greater than or equal to +// the given argument. +// +// sqljson.LenGTE("a", 1, sqljson.Path("b")) +func LenGTE(column string, size int, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + lenPath(b, column, opts...) + b.WriteOp(sql.OpGTE).Arg(size) + }) +} + +// LenLT return a predicate for checking that an array length +// of a JSON (returned by the path) is less than the given +// argument. +// +// sqljson.LenLT("a", 1, sqljson.Path("b")) +func LenLT(column string, size int, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + lenPath(b, column, opts...) + b.WriteOp(sql.OpLT).Arg(size) + }) +} + +// LenLTE return a predicate for checking that an array length +// of a JSON (returned by the path) is less than or equal to +// the given argument. +// +// sqljson.LenLTE("a", 1, sqljson.Path("b")) +func LenLTE(column string, size int, opts ...Option) *sql.Predicate { + return sql.P(func(b *sql.Builder) { + lenPath(b, column, opts...) + b.WriteOp(sql.OpLTE).Arg(size) + }) +} + +// LenPath returns an SQL expression for getting the length +// of a JSON value (returned by the path). +func LenPath(column string, opts ...Option) sql.Querier { + return sql.ExprFunc(func(b *sql.Builder) { + lenPath(b, column, opts...) + }) +} + +// OrderLen returns a custom predicate function (as defined in the doc), +// that sets the result order by the length of the given JSON value. +func OrderLen(column string, opts ...Option) func(*sql.Selector) { + return func(s *sql.Selector) { + s.OrderExpr(LenPath(column, opts...)) + } +} + +// OrderLenDesc returns a custom predicate function (as defined in the doc), that +// sets the result order by the length of the given JSON value, but in descending order. +func OrderLenDesc(column string, opts ...Option) func(*sql.Selector) { + return func(s *sql.Selector) { + s.OrderExpr( + sql.DescExpr(LenPath(column, opts...)), + ) + } +} + +// LenPath writes to the given SQL builder the JSON path for +// getting the length of a given JSON path. +// +// sqljson.LenPath(b, Path("a", "b", "[1]", "c")) +func lenPath(b *sql.Builder, column string, opts ...Option) { + path := identPath(column, opts...) + path.length(b) +} + +// Append writes to the given SQL builder the SQL command for appending JSON values +// into the array, optionally defined as a key. Note, the generated SQL will use the +// Go semantics, the JSON column/key will be set to the given Array in case it is `null` +// or NULL. For example: +// +// Append(u, column, []string{"a", "b"}) +// UPDATE "t" SET "c" = CASE +// WHEN ("c" IS NULL OR "c" = 'null'::jsonb) +// THEN $1 ELSE "c" || $2 END +// +// Append(u, column, []any{"a", 1}, sqljson.Path("a")) +// UPDATE "t" SET "c" = CASE +// WHEN (("c"->'a')::jsonb IS NULL OR ("c"->'a')::jsonb = 'null'::jsonb) +// THEN jsonb_set("c", '{a}', $1, true) ELSE jsonb_set("c", '{a}', "c"->'a' || $2, true) END +func Append[T any](u *sql.UpdateBuilder, column string, elems []T, opts ...Option) { + if len(elems) == 0 { + u.AddError(fmt.Errorf("sqljson: cannot append an empty array to column %q", column)) + return + } + drv, err := newDriver(u.Dialect()) + if err != nil { + u.AddError(err) + return + } + vs := make([]any, len(elems)) + for i, e := range elems { + vs[i] = e + } + drv.Append(u, column, vs, opts...) +} + +// Option allows for calling database JSON paths with functional options. +type Option func(*PathOptions) + +// Path sets the path to the JSON value of a column. +// +// ValuePath(b, "column", Path("a", "b", "[1]", "c")) +func Path(path ...string) Option { + return func(p *PathOptions) { + p.Path = path + } +} + +// DotPath is similar to Path, but accepts string with dot format. +// +// ValuePath(b, "column", DotPath("a.b.c")) +// ValuePath(b, "column", DotPath("a.b[2].c")) +// +// Note that DotPath is ignored if the input is invalid. +func DotPath(dotpath string) Option { + path, _ := ParsePath(dotpath) + return func(p *PathOptions) { + p.Path = path + } +} + +// Unquote indicates that the result value should be unquoted. +// +// ValuePath(b, "column", Path("a", "b", "[1]", "c"), Unquote(true)) +func Unquote(unquote bool) Option { + return func(p *PathOptions) { + p.Unquote = unquote + } +} + +// Cast indicates that the result value should be cast to the given type. +// +// ValuePath(b, "column", Path("a", "b", "[1]", "c"), Cast("int")) +func Cast(typ string) Option { + return func(p *PathOptions) { + p.Cast = typ + } +} + +// PathOptions holds the options for accessing a JSON value from an identifier. +type PathOptions struct { + Ident string + Path []string + Cast string + Unquote bool +} + +// identPath creates a PathOptions for the given identifier. +func identPath(ident string, opts ...Option) *PathOptions { + path := &PathOptions{Ident: ident} + for i := range opts { + opts[i](path) + } + return path +} + +func (p *PathOptions) Query() (string, []any) { + return p.Ident, nil +} + +// ValuePath returns an SQL expression for getting the JSON +// value of a column with an optional path and cast options. +// +// sqljson.ValueEQ( +// column, +// sqljson.ValuePath(column, Path("a"), Cast("int")), +// sqljson.Path("a"), +// ) +func ValuePath(column string, opts ...Option) sql.Querier { + return sql.ExprFunc(func(b *sql.Builder) { + valuePath(b, column, opts...) + }) +} + +// OrderValue returns a custom predicate function (as defined in the doc), +// that sets the result order by the given JSON value. +func OrderValue(column string, opts ...Option) func(*sql.Selector) { + return func(s *sql.Selector) { + s.OrderExpr(ValuePath(column, opts...)) + } +} + +// OrderValueDesc returns a custom predicate function (as defined in the doc), +// that sets the result order by the given JSON value, but in descending order. +func OrderValueDesc(column string, opts ...Option) func(*sql.Selector) { + return func(s *sql.Selector) { + s.OrderExpr( + sql.DescExpr(ValuePath(column, opts...)), + ) + } +} + +// valuePath writes to the given SQL builder the JSON path for +// getting the value of a given JSON path. +// Use sqljson.ValuePath for using a JSON value as an argument. +func valuePath(b *sql.Builder, column string, opts ...Option) { + path := identPath(column, opts...) + path.value(b) +} + +// value writes the path for getting the JSON value. +func (p *PathOptions) value(b *sql.Builder) { + switch { + case len(p.Path) == 0: + b.Ident(p.Ident) + case b.Dialect() == dialect.Postgres: + if p.Cast != "" { + b.WriteByte('(') + defer b.WriteString(")::" + p.Cast) + } + p.pgTextPath(b) + default: + if p.Unquote && b.Dialect() == dialect.MySQL { + b.WriteString("JSON_UNQUOTE(") + defer b.WriteByte(')') + } + p.mysqlFunc("JSON_EXTRACT", b) + } +} + +// value writes the path for getting the length of a JSON value. +func (p *PathOptions) length(b *sql.Builder) { + switch { + case b.Dialect() == dialect.Postgres: + b.WriteString("JSONB_ARRAY_LENGTH(") + p.pgTextPath(b) + b.WriteByte(')') + case b.Dialect() == dialect.MySQL: + p.mysqlFunc("JSON_LENGTH", b) + default: + p.mysqlFunc("JSON_ARRAY_LENGTH", b) + } +} + +// mysqlFunc writes the JSON path in MySQL format for the +// given function. `JSON_EXTRACT("a", '$.b.c')`. +func (p *PathOptions) mysqlFunc(fn string, b *sql.Builder) { + b.WriteString(fn).WriteByte('(') + b.Ident(p.Ident).Comma() + p.mysqlPath(b) + b.WriteByte(')') +} + +// mysqlPath writes the JSON path in MySQL (or SQLite) format. +func (p *PathOptions) mysqlPath(b *sql.Builder) { + b.WriteString(`'$`) + for _, p := range p.Path { + switch _, isIndex := isJSONIdx(p); { + case isIndex: + b.WriteString(p) + case p == "*" || isQuoted(p) || isIdentifier(p): + b.WriteString("." + p) + default: + b.WriteString(`."` + p + `"`) + } + } + b.WriteByte('\'') +} + +// pgTextPath writes the JSON path in PostgreSQL text format: `"a"->'b'->>'c'`. +func (p *PathOptions) pgTextPath(b *sql.Builder) { + b.Ident(p.Ident) + for i, s := range p.Path { + b.WriteString("->") + if p.Unquote && i == len(p.Path)-1 { + b.WriteString(">") + } + if idx, ok := isJSONIdx(s); ok { + b.WriteString(idx) + } else { + b.WriteString("'" + s + "'") + } + } +} + +// pgArrayPath writes the JSON path in PostgreSQL array text[] format: '{a,1,b}'. +func (p *PathOptions) pgArrayPath(b *sql.Builder) { + b.WriteString("'{") + for i, s := range p.Path { + if i > 0 { + b.Comma() + } + if idx, ok := isJSONIdx(s); ok { + s = idx + } + b.WriteString(s) + } + b.WriteString("}'") +} + +// ParsePath parses the "dotpath" for the DotPath option. +// +// "a.b" => ["a", "b"] +// "a[1][2]" => ["a", "[1]", "[2]"] +// "a.\"b.c\" => ["a", "\"b.c\""] +func ParsePath(dotpath string) ([]string, error) { + var ( + i, p int + path []string + ) + for i < len(dotpath) { + switch r := dotpath[i]; { + case r == '"': + if i == len(dotpath)-1 { + return nil, fmt.Errorf("unexpected quote") + } + idx := strings.IndexRune(dotpath[i+1:], '"') + if idx == -1 || idx == 0 { + return nil, fmt.Errorf("unbalanced quote") + } + i += idx + 2 + case r == '[': + if p != i { + path = append(path, dotpath[p:i]) + } + p = i + if i == len(dotpath)-1 { + return nil, fmt.Errorf("unexpected bracket") + } + idx := strings.IndexRune(dotpath[i:], ']') + if idx == -1 || idx == 1 { + return nil, fmt.Errorf("unbalanced bracket") + } + if !isNumber(dotpath[i+1 : i+idx]) { + return nil, fmt.Errorf("invalid index %q", dotpath[i:i+idx+1]) + } + i += idx + 1 + case r == '.' || r == ']': + if p != i { + path = append(path, dotpath[p:i]) + } + i++ + p = i + default: + i++ + } + } + if p != i { + path = append(path, dotpath[p:i]) + } + return path, nil +} + +// normalizePG adds cast option to the JSON path is the argument type is +// not string, in order to avoid "missing type casts" error in Postgres. +func normalizePG(b *sql.Builder, arg any, opts []Option) []Option { + if b.Dialect() != dialect.Postgres { + return opts + } + base := []Option{Unquote(true)} + switch arg.(type) { + case string: + case bool: + base = append(base, Cast("bool")) + case float32, float64: + base = append(base, Cast("float")) + case int8, int16, int32, int64, int, uint8, uint16, uint32, uint64: + base = append(base, Cast("int")) + } + return append(base, opts...) +} + +func isIdentifier(name string) bool { + if name == "" { + return false + } + for i, c := range name { + if !unicode.IsLetter(c) && c != '_' && (i == 0 || !unicode.IsDigit(c)) { + return false + } + } + return true +} + +func isQuoted(s string) bool { + if s == "" { + return false + } + return s[0] == '"' && s[len(s)-1] == '"' +} + +// isJSONIdx reports whether the string represents a JSON index. +func isJSONIdx(s string) (string, bool) { + if len(s) > 2 && s[0] == '[' && s[len(s)-1] == ']' && (isNumber(s[1:len(s)-1]) || s[1] == '#' && isNumber(s[2:len(s)-1])) { + return s[1 : len(s)-1], true + } + return "", false +} + +// isNumber reports whether the string is a number (category N). +func isNumber(s string) bool { + for _, r := range s { + if !unicode.IsNumber(r) { + return false + } + } + return true +} + +// allString reports if the slice contains only strings. +func allString(v []any) bool { + for i := range v { + if _, ok := v[i].(string); !ok { + return false + } + } + return true +} + +// marshalArg stringifies the given argument to a valid JSON document. +func marshalArg(arg any) any { + if buf, err := json.Marshal(arg); err == nil { + arg = string(buf) + } + return arg +} diff --git a/vendor/entgo.io/ent/ent.go b/vendor/entgo.io/ent/ent.go new file mode 100644 index 00000000..2c276b5c --- /dev/null +++ b/vendor/entgo.io/ent/ent.go @@ -0,0 +1,537 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +// Package ent is the interface between end-user schemas and entc (ent codegen). +package ent + +import ( + "context" + + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +type ( + // The Interface type describes the requirements for an exported type defined in the schema package. + // It functions as the interface between the user's schema types and codegen loader. + // Users should use the Schema type for embedding as follows: + // + // type T struct { + // ent.Schema + // } + // + Interface interface { + // Type is a dummy method, that is used in edge declaration. + // + // The Type method should be used as follows: + // + // type S struct { ent.Schema } + // + // type T struct { ent.Schema } + // + // func (T) Edges() []ent.Edge { + // return []ent.Edge{ + // edge.To("S", S.Type), + // } + // } + // + Type() + // Fields returns the fields of the schema. + Fields() []Field + // Edges returns the edges of the schema. + Edges() []Edge + // Indexes returns the indexes of the schema. + Indexes() []Index + // Config returns an optional config for the schema. + // + // Deprecated: the Config method predates the Annotations method, and it + // is planned be removed in v0.5.0. New code should use Annotations instead. + // + // func (T) Annotations() []schema.Annotation { + // return []schema.Annotation{ + // entsql.Annotation{Table: "Name"}, + // } + // } + // + Config() Config + // Mixin returns an optional list of Mixin to extends + // the schema. + Mixin() []Mixin + // Hooks returns an optional list of Hook to apply on + // the executed mutations. + Hooks() []Hook + // Interceptors returns an optional list of Interceptor + // to apply on the executed queries. + Interceptors() []Interceptor + // Policy returns the privacy policy of the schema. + Policy() Policy + // Annotations returns a list of schema annotations to be used by + // codegen extensions. + Annotations() []schema.Annotation + } + + // A Field interface returns a field descriptor for vertex fields/properties. + // The usage for the interface is as follows: + // + // func (T) Fields() []ent.Field { + // return []ent.Field{ + // field.Int("int"), + // } + // } + // + Field interface { + Descriptor() *field.Descriptor + } + + // An Edge interface returns an edge descriptor for vertex edges. + // The usage for the interface is as follows: + // + // func (T) Edges() []ent.Edge { + // return []ent.Edge{ + // edge.To("S", S.Type), + // } + // } + // + Edge interface { + Descriptor() *edge.Descriptor + } + + // An Index interface returns an index descriptor for vertex indexes. + // The usage for the interface is as follows: + // + // func (T) Indexes() []ent.Index { + // return []ent.Index{ + // index.Fields("f1", "f2"). + // Unique(), + // } + // } + // + Index interface { + Descriptor() *index.Descriptor + } + + // A Config structure is used to configure an entity schema. + // The usage of this structure is as follows: + // + // func (T) Config() ent.Config { + // return ent.Config{ + // Table: "Name", + // } + // } + // + // Deprecated: the Config object predates the schema.Annotation method and it + // is planned be removed in v0.5.0. New code should use Annotations instead. + // + // func (T) Annotations() []schema.Annotation { + // return []schema.Annotation{ + // entsql.Annotation{Table: "Name"}, + // } + // } + // + Config struct { + // A Table is an optional table name defined for the schema. + Table string + } + + // The Mixin type describes a set of methods that can extend + // other methods in the schema without calling them directly. + // + // type TimeMixin struct {} + // + // func (TimeMixin) Fields() []ent.Field { + // return []ent.Field{ + // field.Time("created_at"). + // Immutable(). + // Default(time.Now), + // field.Time("updated_at"). + // Default(time.Now). + // UpdateDefault(time.Now), + // } + // } + // + // type T struct { + // ent.Schema + // } + // + // func(T) Mixin() []ent.Mixin { + // return []ent.Mixin{ + // TimeMixin{}, + // } + // } + // + Mixin interface { + // Fields returns a slice of fields to add to the schema. + Fields() []Field + // Edges returns a slice of edges to add to the schema. + Edges() []Edge + // Indexes returns a slice of indexes to add to the schema. + Indexes() []Index + // Hooks returns a slice of hooks to add to the schema. + // Note that mixin hooks are executed before schema hooks. + Hooks() []Hook + // Interceptors returns a slice of interceptors to add to the schema. + // Note that mixin interceptors are executed before schema interceptors. + Interceptors() []Interceptor + // Policy returns a privacy policy to add to the schema. + // Note that mixin policy are executed before schema policy. + Policy() Policy + // Annotations returns a list of schema annotations to add + // to the schema annotations. + Annotations() []schema.Annotation + } + + // The Policy type defines the privacy policy of an entity. + // The usage for the interface is as follows: + // + // type T struct { + // ent.Schema + // } + // + // func(T) Policy() ent.Policy { + // return privacy.AlwaysAllowRule() + // } + // + Policy interface { + EvalMutation(context.Context, Mutation) error + EvalQuery(context.Context, Query) error + } + + // Schema is the default implementation for the schema Interface. + // It can be embedded in end-user schemas as follows: + // + // type T struct { + // ent.Schema + // } + // + Schema struct { + Interface + } +) + +// Fields of the schema. +func (Schema) Fields() []Field { return nil } + +// Edges of the schema. +func (Schema) Edges() []Edge { return nil } + +// Indexes of the schema. +func (Schema) Indexes() []Index { return nil } + +// Config of the schema. +func (Schema) Config() Config { return Config{} } + +// Mixin of the schema. +func (Schema) Mixin() []Mixin { return nil } + +// Hooks of the schema. +func (Schema) Hooks() []Hook { return nil } + +// Interceptors of the schema. +func (Schema) Interceptors() []Interceptor { return nil } + +// Policy of the schema. +func (Schema) Policy() Policy { return nil } + +// Annotations of the schema. +func (Schema) Annotations() []schema.Annotation { return nil } + +type ( + // Value represents a value returned by ent. + Value any + + // Mutation represents an operation that mutate the graph. + // For example, adding a new node, updating many, or dropping + // data. The implementation is generated by entc (ent codegen). + Mutation interface { + // Op returns the operation name generated by entc. + Op() Op + // Type returns the schema type for this mutation. + Type() string + + // Fields returns all fields that were changed during + // this mutation. Note that, in order to get all numeric + // fields that were in/decremented, call AddedFields(). + Fields() []string + // Field returns the value of a field with the given name. + // The second boolean value indicates that this field was + // not set, or was not defined in the schema. + Field(name string) (Value, bool) + // SetField sets the value for the given name. It returns an + // error if the field is not defined in the schema, or if the + // type mismatch the field type. + SetField(name string, value Value) error + + // AddedFields returns all numeric fields that were incremented + // or decremented during this mutation. + AddedFields() []string + // AddedField returns the numeric value that was in/decremented + // from a field with the given name. The second value indicates + // that this field was not set, or was not define in the schema. + AddedField(name string) (Value, bool) + // AddField adds the value for the given name. It returns an + // error if the field is not defined in the schema, or if the + // type mismatch the field type. + AddField(name string, value Value) error + + // ClearedFields returns all nullable fields that were cleared + // during this mutation. + ClearedFields() []string + // FieldCleared returns a bool indicates if this field was + // cleared in this mutation. + FieldCleared(name string) bool + // ClearField clears the value for the given name. It returns an + // error if the field is not defined in the schema. + ClearField(name string) error + + // ResetField resets all changes in the mutation regarding the + // given field name. It returns an error if the field is not + // defined in the schema. + ResetField(name string) error + + // AddedEdges returns all edge names that were set/added in this + // mutation. + AddedEdges() []string + // AddedIDs returns all ids (to other nodes) that were added for + // the given edge name. + AddedIDs(name string) []Value + + // RemovedEdges returns all edge names that were removed in this + // mutation. + RemovedEdges() []string + // RemovedIDs returns all ids (to other nodes) that were removed for + // the given edge name. + RemovedIDs(name string) []Value + + // ClearedEdges returns all edge names that were cleared in this + // mutation. + ClearedEdges() []string + // EdgeCleared returns a bool indicates if this edge was + // cleared in this mutation. + EdgeCleared(name string) bool + // ClearEdge clears the value for the given name. It returns an + // error if the edge name is not defined in the schema. + ClearEdge(name string) error + + // ResetEdge resets all changes in the mutation regarding the + // given edge name. It returns an error if the edge is not + // defined in the schema. + ResetEdge(name string) error + + // OldField returns the old value of the field from the database. + // An error is returned if the mutation operation is not UpdateOne, + // or the query to the database was failed. + OldField(ctx context.Context, name string) (Value, error) + } + + // Mutator is the interface that wraps the Mutate method. + Mutator interface { + // Mutate apply the given mutation on the graph. + Mutate(context.Context, Mutation) (Value, error) + } + + // The MutateFunc type is an adapter to allow the use of ordinary + // function as Mutator. If f is a function with the appropriate signature, + // MutateFunc(f) is a Mutator that calls f. + MutateFunc func(context.Context, Mutation) (Value, error) + + // Hook defines the "mutation middleware". A function that gets a Mutator + // and returns a Mutator. For example: + // + // hook := func(next ent.Mutator) ent.Mutator { + // return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + // fmt.Printf("Type: %s, Operation: %s, ConcreteType: %T\n", m.Type(), m.Op(), m) + // return next.Mutate(ctx, m) + // }) + // } + // + Hook func(Mutator) Mutator +) + +// Mutate calls f(ctx, m). +func (f MutateFunc) Mutate(ctx context.Context, m Mutation) (Value, error) { + return f(ctx, m) +} + +type ( + // Query represents a query builder of an entity. It is + // usually one of the following types: Query. + Query any + + // Querier is the interface that wraps the Query method. + // Calling Querier.Query(ent.Query) triggers the execution + // of the query. + Querier interface { + // Query runs the given query on the graph and returns its result. + Query(context.Context, Query) (Value, error) + } + + // The QuerierFunc type is an adapter to allow the use of ordinary + // function as Querier. If f is a function with the appropriate signature, + // QuerierFunc(f) is a Querier that calls f. + QuerierFunc func(context.Context, Query) (Value, error) + + // Interceptor defines an execution middleware for various types of Ent queries. + // Contrary to Hooks, Interceptors are implemented as interfaces, allows them to + // intercept and modify the query at different stages, providing more fine-grained + // control over its behavior. For example, see the Traverser interface. + Interceptor interface { + // Intercept is a function that gets a Querier and returns a Querier. For example: + // + // ent.InterceptFunc(func(next ent.Querier) ent.Querier { + // return ent.QuerierFunc(func(ctx context.Context, query ent.Query) (ent.Value, error) { + // // Do something before the query execution. + // value, err := next.Query(ctx, query) + // // Do something after the query execution. + // return value, err + // }) + // }) + // + // Note that unlike Traverse functions, which are called at each traversal stage, Intercept functions + // are invoked before the query executions. This means that using Traverse functions is a better fit + // for adding default filters, while using Intercept functions is a better fit for implementing logging + // or caching. + // + // + // client.User.Query(). + // QueryGroups(). // User traverse functions applied. + // QueryPosts(). // Group traverse functions applied. + // All(ctx) // Post traverse and intercept functions applied. + // + Intercept(Querier) Querier + } + + // The InterceptFunc type is an adapter to allow the use of ordinary function as Interceptor. + // If f is a function with the appropriate signature, InterceptFunc(f) is an Interceptor that calls f. + InterceptFunc func(Querier) Querier + + // Traverser defines a graph-traversal middleware for various types of Ent queries. + // Contrary to Interceptors, the Traverse are executed on graph traversals before the + // query is executed. For example: + // + // ent.TraverseFunc(func(ctx context.Context, q ent.Query) error { + // // Filter out deleted pets. + // if pq, ok := q.(*gen.PetQuery); ok { + // pq.Where(pet.DeletedAtIsNil()) + // } + // return nil + // }) + // + // client.Pet.Query(). + // QueryOwner(). // Pet traverse functions are applied and filter deleted pets. + // All(ctx) // User traverse and interceptor functions are applied. + // + Traverser interface { + Traverse(context.Context, Query) error + } + + // The TraverseFunc type is an adapter to allow the use of ordinary function as Traverser. + // If f is a function with the appropriate signature, TraverseFunc(f) is a Traverser that calls f. + TraverseFunc func(context.Context, Query) error +) + +// Query calls f(ctx, q). +func (f QuerierFunc) Query(ctx context.Context, q Query) (Value, error) { + return f(ctx, q) +} + +// Intercept calls f(ctx, q). +func (f InterceptFunc) Intercept(next Querier) Querier { + return f(next) +} + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseFunc) Intercept(next Querier) Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseFunc) Traverse(ctx context.Context, q Query) error { + return f(ctx, q) +} + +//go:generate go run golang.org/x/tools/cmd/stringer -type Op + +// An Op represents a mutation operation. +type Op uint + +// Mutation operations. +const ( + OpCreate Op = 1 << iota // node creation. + OpUpdate // update nodes by predicate (if any). + OpUpdateOne // update one node. + OpDelete // delete nodes by predicate (if any). + OpDeleteOne // delete one node. +) + +// Is reports whether o is match the given operation. +func (i Op) Is(o Op) bool { return i&o != 0 } + +type ( + // QueryContext contains additional information about + // the context in which the query is executed. + QueryContext struct { + // Op defines the operation name. e.g., First, All, Count, etc. + Op string + // Type defines the query type as defined in the generated code. + Type string + // Unique indicates if the Unique modifier was set on the query and + // its value. Calling Unique(false) sets the value of Unique to false. + Unique *bool + // Limit indicates if the Limit modifier was set on the query and + // its value. Calling Limit(10) sets the value of Limit to 10. + Limit *int + // Offset indicates if the Offset modifier was set on the query and + // its value. Calling Offset(10) sets the value of Offset to 10. + Offset *int + // Fields specifies the fields that were selected in the query. + Fields []string + } + queryCtxKey struct{} +) + +// NewQueryContext returns a new context with the given QueryContext attached. +func NewQueryContext(parent context.Context, c *QueryContext) context.Context { + return context.WithValue(parent, queryCtxKey{}, c) +} + +// QueryFromContext returns the QueryContext value stored in ctx, if any. +func QueryFromContext(ctx context.Context) *QueryContext { + c, _ := ctx.Value(queryCtxKey{}).(*QueryContext) + return c +} + +// Clone returns a deep copy of the query context. +func (q *QueryContext) Clone() *QueryContext { + c := &QueryContext{ + Op: q.Op, + Type: q.Type, + Fields: append([]string(nil), q.Fields...), + } + if q.Unique != nil { + v := *q.Unique + c.Unique = &v + } + if q.Limit != nil { + v := *q.Limit + c.Limit = &v + } + if q.Offset != nil { + v := *q.Offset + c.Offset = &v + } + return c +} + +// AppendFieldOnce adds the given field to the spec if it is not already present. +func (q *QueryContext) AppendFieldOnce(f string) *QueryContext { + for _, f1 := range q.Fields { + if f == f1 { + return q + } + } + q.Fields = append(q.Fields, f) + return q +} diff --git a/vendor/entgo.io/ent/entql/BUILD b/vendor/entgo.io/ent/entql/BUILD new file mode 100644 index 00000000..bf78de78 --- /dev/null +++ b/vendor/entgo.io/ent/entql/BUILD @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "entql", + srcs = [ + "entql.go", + "types.go", + ], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/entql", + importpath = "entgo.io/ent/entql", + visibility = ["//visibility:public"], +) diff --git a/vendor/entgo.io/ent/entql/entql.go b/vendor/entgo.io/ent/entql/entql.go new file mode 100644 index 00000000..8f9f1f9f --- /dev/null +++ b/vendor/entgo.io/ent/entql/entql.go @@ -0,0 +1,464 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +// Package entql provides an experimental API for interacting dynamically +// with ent queries. For more info, search for it in https://entgo.io. +package entql + +import ( + "encoding/json" + "fmt" + "strings" +) + +// An Op represents a predicate operator. +type Op int + +// Builtin operators. +const ( + OpAnd Op = iota // logical and. + OpOr // logical or. + OpNot // logical negation. + OpEQ // = + OpNEQ // <> + OpGT // > + OpGTE // >= + OpLT // < + OpLTE // <= + OpIn // IN + OpNotIn // NOT IN +) + +var ops = [...]string{ + OpAnd: "&&", + OpOr: "||", + OpNot: "!", + OpEQ: "==", + OpNEQ: "!=", + OpGT: ">", + OpGTE: ">=", + OpLT: "<", + OpLTE: "<=", + OpIn: "in", + OpNotIn: "not in", +} + +// String returns the text representation of an operator. +func (o Op) String() string { + if o >= 0 && int(o) < len(ops) { + return ops[o] + } + return "" +} + +// A Func represents a function expression. +type Func string + +// Builtin functions. +const ( + FuncEqualFold Func = "equal_fold" // equals case-insensitive + FuncContains Func = "contains" // containing + FuncContainsFold Func = "contains_fold" // containing case-insensitive + FuncHasPrefix Func = "has_prefix" // startingWith + FuncHasSuffix Func = "has_suffix" // endingWith + FuncHasEdge Func = "has_edge" // HasEdge +) + +type ( + // Expr represents an entql expression. All expressions implement the Expr interface. + Expr interface { + expr() + fmt.Stringer + } + + // P represents an expression that returns a boolean value depending on its variables. + P interface { + Expr + Negate() P + } +) + +type ( + // A UnaryExpr represents a unary expression. + UnaryExpr struct { + Op Op + X Expr + } + + // A BinaryExpr represents a binary expression. + BinaryExpr struct { + Op Op + X, Y Expr + } + + // A NaryExpr represents a n-ary expression. + NaryExpr struct { + Op Op + Xs []Expr + } + + // A CallExpr represents a function call with its arguments. + CallExpr struct { + Func Func + Args []Expr + } + + // A Field represents a node field. + Field struct { + Name string + } + + // An Edge represents an edge in the graph. + Edge struct { + Name string + } + + // A Value represents an arbitrary value. + Value struct { + V any + } +) + +// Not returns a predicate that represents the logical negation of the given predicate. +func Not(x P) P { + return &UnaryExpr{ + Op: OpNot, + X: x, + } +} + +// And returns a composed predicate that represents the logical AND predicate. +func And(x, y P, z ...P) P { + if len(z) == 0 { + return &BinaryExpr{ + Op: OpAnd, + X: x, + Y: y, + } + } + return &NaryExpr{ + Op: OpAnd, + Xs: append([]Expr{x, y}, p2expr(z)...), + } +} + +// Or returns a composed predicate that represents the logical OR predicate. +func Or(x, y P, z ...P) P { + if len(z) == 0 { + return &BinaryExpr{ + Op: OpOr, + X: x, + Y: y, + } + } + return &NaryExpr{ + Op: OpOr, + Xs: append([]Expr{x, y}, p2expr(z)...), + } +} + +// F returns a field expression for the given name. +func F(name string) *Field { + return &Field{Name: name} +} + +// EQ returns a predicate to check if the expressions are equal. +func EQ(x, y Expr) P { + return &BinaryExpr{ + Op: OpEQ, + X: x, + Y: y, + } +} + +// FieldEQ returns a predicate to check if a field is equivalent to a given value. +func FieldEQ(name string, v any) P { + return &BinaryExpr{ + Op: OpEQ, + X: &Field{Name: name}, + Y: &Value{V: v}, + } +} + +// NEQ returns a predicate to check if the expressions are not equal. +func NEQ(x, y Expr) P { + return &BinaryExpr{ + Op: OpNEQ, + X: x, + Y: y, + } +} + +// FieldNEQ returns a predicate to check if a field is not equivalent to a given value. +func FieldNEQ(name string, v any) P { + return &BinaryExpr{ + Op: OpNEQ, + X: &Field{Name: name}, + Y: &Value{V: v}, + } +} + +// GT returns a predicate to check if the expression x > than expression y. +func GT(x, y Expr) P { + return &BinaryExpr{ + Op: OpGT, + X: x, + Y: y, + } +} + +// FieldGT returns a predicate to check if a field is > than the given value. +func FieldGT(name string, v any) P { + return &BinaryExpr{ + Op: OpGT, + X: &Field{Name: name}, + Y: &Value{V: v}, + } +} + +// GTE returns a predicate to check if the expression x >= than expression y. +func GTE(x, y Expr) P { + return &BinaryExpr{ + Op: OpGTE, + X: x, + Y: y, + } +} + +// FieldGTE returns a predicate to check if a field is >= than the given value. +func FieldGTE(name string, v any) P { + return &BinaryExpr{ + Op: OpGTE, + X: &Field{Name: name}, + Y: &Value{V: v}, + } +} + +// LT returns a predicate to check if the expression x < than expression y. +func LT(x, y Expr) P { + return &BinaryExpr{ + Op: OpLT, + X: x, + Y: y, + } +} + +// FieldLT returns a predicate to check if a field is < than the given value. +func FieldLT(name string, v any) P { + return &BinaryExpr{ + Op: OpLT, + X: &Field{Name: name}, + Y: &Value{V: v}, + } +} + +// LTE returns a predicate to check if the expression x <= than expression y. +func LTE(x, y Expr) P { + return &BinaryExpr{ + Op: OpLTE, + X: x, + Y: y, + } +} + +// FieldLTE returns a predicate to check if a field is <= >than the given value. +func FieldLTE(name string, v any) P { + return &BinaryExpr{ + Op: OpLTE, + X: &Field{Name: name}, + Y: &Value{V: v}, + } +} + +// FieldContains returns a predicate to check if the field value contains a substr. +func FieldContains(name, substr string) P { + return &CallExpr{ + Func: FuncContains, + Args: []Expr{&Field{Name: name}, &Value{V: substr}}, + } +} + +// FieldContainsFold returns a predicate to check if the field value contains a substr under case-folding. +func FieldContainsFold(name, substr string) P { + return &CallExpr{ + Func: FuncContainsFold, + Args: []Expr{&Field{Name: name}, &Value{V: substr}}, + } +} + +// FieldEqualFold returns a predicate to check if the field is equal to the given string under case-folding. +func FieldEqualFold(name, v string) P { + return &CallExpr{ + Func: FuncEqualFold, + Args: []Expr{&Field{Name: name}, &Value{V: v}}, + } +} + +// FieldHasPrefix returns a predicate to check if the field starts with the given prefix. +func FieldHasPrefix(name, prefix string) P { + return &CallExpr{ + Func: FuncHasPrefix, + Args: []Expr{&Field{Name: name}, &Value{V: prefix}}, + } +} + +// FieldHasSuffix returns a predicate to check if the field ends with the given suffix. +func FieldHasSuffix(name, suffix string) P { + return &CallExpr{ + Func: FuncHasSuffix, + Args: []Expr{&Field{Name: name}, &Value{V: suffix}}, + } +} + +// FieldIn returns a predicate to check if the field value matches any value in the given list. +func FieldIn(name string, vs ...any) P { + return &BinaryExpr{ + Op: OpIn, + X: &Field{Name: name}, + Y: &Value{V: vs}, + } +} + +// FieldNotIn returns a predicate to check if the field value doesn't match any value in the given list. +func FieldNotIn(name string, vs ...any) P { + return &BinaryExpr{ + Op: OpNotIn, + X: &Field{Name: name}, + Y: &Value{V: vs}, + } +} + +// FieldNil returns a predicate to check if a field is nil (null in databases). +func FieldNil(name string) P { + return &BinaryExpr{ + Op: OpEQ, + X: &Field{Name: name}, + Y: (*Value)(nil), + } +} + +// FieldNotNil returns a predicate to check if a field is not nil (not null in databases). +func FieldNotNil(name string) P { + return &BinaryExpr{ + Op: OpNEQ, + X: &Field{Name: name}, + Y: (*Value)(nil), + } +} + +// HasEdge returns a predicate to check if an edge exists (not null in databases). +func HasEdge(name string) P { + return &CallExpr{ + Func: FuncHasEdge, + Args: []Expr{&Edge{Name: name}}, + } +} + +// HasEdgeWith returns a predicate to check if the "other nodes" that are connected to the +// edge returns true on the provided predicate. +func HasEdgeWith(name string, p ...P) P { + return &CallExpr{ + Func: FuncHasEdge, + Args: append([]Expr{&Edge{Name: name}}, p2expr(p)...), + } +} + +// Negate negates the predicate. +func (e *BinaryExpr) Negate() P { + return Not(e) +} + +// Negate negates the predicate. +func (e *NaryExpr) Negate() P { + return Not(e) +} + +// Negate negates the predicate. +func (e *UnaryExpr) Negate() P { + return Not(e) +} + +// Negate negates the predicate. +func (e *CallExpr) Negate() P { + return Not(e) +} + +// String returns the text representation of a binary expression. +func (e *BinaryExpr) String() string { + return fmt.Sprintf("%s %s %s", e.X, e.Op, e.Y) +} + +// String returns the text representation of a unary expression. +func (e *UnaryExpr) String() string { + return fmt.Sprintf("%s(%s)", e.Op, e.X) +} + +// String returns the text representation of an n-ary expression. +func (e *NaryExpr) String() string { + var s strings.Builder + s.WriteByte('(') + for i, x := range e.Xs { + if i > 0 { + s.WriteByte(' ') + s.WriteString(e.Op.String()) + s.WriteByte(' ') + } + s.WriteString(x.String()) + } + s.WriteByte(')') + return s.String() +} + +// String returns the text representation of a call expression. +func (e *CallExpr) String() string { + var s strings.Builder + s.WriteString(string(e.Func)) + s.WriteByte('(') + for i, x := range e.Args { + if i > 0 { + s.WriteString(", ") + } + s.WriteString(x.String()) + } + s.WriteByte(')') + return s.String() +} + +// String returns the text representation of a field. +func (f *Field) String() string { + return f.Name +} + +// String returns the text representation of an edge. +func (e *Edge) String() string { + return e.Name +} + +// String returns the text representation of a value. +func (v *Value) String() string { + if v == nil { + return "nil" + } + buf, err := json.Marshal(v.V) + if err != nil { + return fmt.Sprint(v.V) + } + return string(buf) +} + +func p2expr(ps []P) []Expr { + expr := make([]Expr, len(ps)) + for i := range ps { + expr[i] = ps[i] + } + return expr +} + +func (*Edge) expr() {} +func (*Field) expr() {} +func (*Value) expr() {} +func (*CallExpr) expr() {} +func (*NaryExpr) expr() {} +func (*UnaryExpr) expr() {} +func (*BinaryExpr) expr() {} diff --git a/vendor/entgo.io/ent/entql/types.go b/vendor/entgo.io/ent/entql/types.go new file mode 100644 index 00000000..9c132eb9 --- /dev/null +++ b/vendor/entgo.io/ent/entql/types.go @@ -0,0 +1,1980 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +// Code generated by internal/gen.go, DO NOT EDIT. + +package entql + +import ( + "database/sql/driver" + "time" +) + +//go:generate go run internal/gen.go + +// Fielder is the interface for creating a predicate (entql.P) +// by a field name from the different builder types below. +type Fielder interface { + Field(string) P +} + +// BoolP is the interface for predicates of type bool (`type P[bool]`). +type BoolP interface { + Fielder + bool() +} + +// boolP implements the BoolP interface. +type boolP struct { + P + done func(string) +} + +func (p *boolP) Field(name string) P { + p.done(name) + return p.P +} + +func (*boolP) bool() {} + +// BoolNil applies the Nil operation +func BoolNil() BoolP { + field := &Field{} + done := func(name string) { field.Name = name } + return &boolP{P: EQ(field, (*Value)(nil)), done: done} +} + +// BoolNotNil applies the NotNil operation +func BoolNotNil() BoolP { + field := &Field{} + done := func(name string) { field.Name = name } + return &boolP{P: NEQ(field, (*Value)(nil)), done: done} +} + +// BoolEQ applies the EQ operation on the given value. +func BoolEQ(v bool) BoolP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &boolP{P: EQ(field, value), done: done} +} + +// BoolNEQ applies the NEQ operation on the given value. +func BoolNEQ(v bool) BoolP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &boolP{P: NEQ(field, value), done: done} +} + +// BoolOr returns a composed predicate that represents the logical OR predicate. +func BoolOr(x, y BoolP, z ...BoolP) BoolP { + expr := &boolP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// BoolAnd returns a composed predicate that represents the logical AND predicate. +func BoolAnd(x, y BoolP, z ...BoolP) BoolP { + expr := &boolP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// BoolNot returns a predicate that represents the logical negation of the given predicate. +func BoolNot(x BoolP) BoolP { + expr := &boolP{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// BytesP is the interface for predicates of type []byte (`type P[[]byte]`). +type BytesP interface { + Fielder + bytes() +} + +// bytesP implements the BytesP interface. +type bytesP struct { + P + done func(string) +} + +func (p *bytesP) Field(name string) P { + p.done(name) + return p.P +} + +func (*bytesP) bytes() {} + +// BytesNil applies the Nil operation +func BytesNil() BytesP { + field := &Field{} + done := func(name string) { field.Name = name } + return &bytesP{P: EQ(field, (*Value)(nil)), done: done} +} + +// BytesNotNil applies the NotNil operation +func BytesNotNil() BytesP { + field := &Field{} + done := func(name string) { field.Name = name } + return &bytesP{P: NEQ(field, (*Value)(nil)), done: done} +} + +// BytesEQ applies the EQ operation on the given value. +func BytesEQ(v []byte) BytesP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &bytesP{P: EQ(field, value), done: done} +} + +// BytesNEQ applies the NEQ operation on the given value. +func BytesNEQ(v []byte) BytesP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &bytesP{P: NEQ(field, value), done: done} +} + +// BytesOr returns a composed predicate that represents the logical OR predicate. +func BytesOr(x, y BytesP, z ...BytesP) BytesP { + expr := &bytesP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// BytesAnd returns a composed predicate that represents the logical AND predicate. +func BytesAnd(x, y BytesP, z ...BytesP) BytesP { + expr := &bytesP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// BytesNot returns a predicate that represents the logical negation of the given predicate. +func BytesNot(x BytesP) BytesP { + expr := &bytesP{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// TimeP is the interface for predicates of type time.Time (`type P[time.Time]`). +type TimeP interface { + Fielder + time() +} + +// timeP implements the TimeP interface. +type timeP struct { + P + done func(string) +} + +func (p *timeP) Field(name string) P { + p.done(name) + return p.P +} + +func (*timeP) time() {} + +// TimeNil applies the Nil operation +func TimeNil() TimeP { + field := &Field{} + done := func(name string) { field.Name = name } + return &timeP{P: EQ(field, (*Value)(nil)), done: done} +} + +// TimeNotNil applies the NotNil operation +func TimeNotNil() TimeP { + field := &Field{} + done := func(name string) { field.Name = name } + return &timeP{P: NEQ(field, (*Value)(nil)), done: done} +} + +// TimeEQ applies the EQ operation on the given value. +func TimeEQ(v time.Time) TimeP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &timeP{P: EQ(field, value), done: done} +} + +// TimeNEQ applies the NEQ operation on the given value. +func TimeNEQ(v time.Time) TimeP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &timeP{P: NEQ(field, value), done: done} +} + +// TimeLT applies the LT operation on the given value. +func TimeLT(v time.Time) TimeP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &timeP{P: LT(field, value), done: done} +} + +// TimeLTE applies the LTE operation on the given value. +func TimeLTE(v time.Time) TimeP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &timeP{P: LTE(field, value), done: done} +} + +// TimeGT applies the GT operation on the given value. +func TimeGT(v time.Time) TimeP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &timeP{P: GT(field, value), done: done} +} + +// TimeGTE applies the GTE operation on the given value. +func TimeGTE(v time.Time) TimeP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &timeP{P: GTE(field, value), done: done} +} + +// TimeOr returns a composed predicate that represents the logical OR predicate. +func TimeOr(x, y TimeP, z ...TimeP) TimeP { + expr := &timeP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// TimeAnd returns a composed predicate that represents the logical AND predicate. +func TimeAnd(x, y TimeP, z ...TimeP) TimeP { + expr := &timeP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// TimeNot returns a predicate that represents the logical negation of the given predicate. +func TimeNot(x TimeP) TimeP { + expr := &timeP{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// UintP is the interface for predicates of type uint (`type P[uint]`). +type UintP interface { + Fielder + uint() +} + +// uintP implements the UintP interface. +type uintP struct { + P + done func(string) +} + +func (p *uintP) Field(name string) P { + p.done(name) + return p.P +} + +func (*uintP) uint() {} + +// UintNil applies the Nil operation +func UintNil() UintP { + field := &Field{} + done := func(name string) { field.Name = name } + return &uintP{P: EQ(field, (*Value)(nil)), done: done} +} + +// UintNotNil applies the NotNil operation +func UintNotNil() UintP { + field := &Field{} + done := func(name string) { field.Name = name } + return &uintP{P: NEQ(field, (*Value)(nil)), done: done} +} + +// UintEQ applies the EQ operation on the given value. +func UintEQ(v uint) UintP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uintP{P: EQ(field, value), done: done} +} + +// UintNEQ applies the NEQ operation on the given value. +func UintNEQ(v uint) UintP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uintP{P: NEQ(field, value), done: done} +} + +// UintLT applies the LT operation on the given value. +func UintLT(v uint) UintP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uintP{P: LT(field, value), done: done} +} + +// UintLTE applies the LTE operation on the given value. +func UintLTE(v uint) UintP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uintP{P: LTE(field, value), done: done} +} + +// UintGT applies the GT operation on the given value. +func UintGT(v uint) UintP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uintP{P: GT(field, value), done: done} +} + +// UintGTE applies the GTE operation on the given value. +func UintGTE(v uint) UintP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uintP{P: GTE(field, value), done: done} +} + +// UintOr returns a composed predicate that represents the logical OR predicate. +func UintOr(x, y UintP, z ...UintP) UintP { + expr := &uintP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// UintAnd returns a composed predicate that represents the logical AND predicate. +func UintAnd(x, y UintP, z ...UintP) UintP { + expr := &uintP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// UintNot returns a predicate that represents the logical negation of the given predicate. +func UintNot(x UintP) UintP { + expr := &uintP{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// Uint8P is the interface for predicates of type uint8 (`type P[uint8]`). +type Uint8P interface { + Fielder + uint8() +} + +// uint8P implements the Uint8P interface. +type uint8P struct { + P + done func(string) +} + +func (p *uint8P) Field(name string) P { + p.done(name) + return p.P +} + +func (*uint8P) uint8() {} + +// Uint8Nil applies the Nil operation +func Uint8Nil() Uint8P { + field := &Field{} + done := func(name string) { field.Name = name } + return &uint8P{P: EQ(field, (*Value)(nil)), done: done} +} + +// Uint8NotNil applies the NotNil operation +func Uint8NotNil() Uint8P { + field := &Field{} + done := func(name string) { field.Name = name } + return &uint8P{P: NEQ(field, (*Value)(nil)), done: done} +} + +// Uint8EQ applies the EQ operation on the given value. +func Uint8EQ(v uint8) Uint8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint8P{P: EQ(field, value), done: done} +} + +// Uint8NEQ applies the NEQ operation on the given value. +func Uint8NEQ(v uint8) Uint8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint8P{P: NEQ(field, value), done: done} +} + +// Uint8LT applies the LT operation on the given value. +func Uint8LT(v uint8) Uint8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint8P{P: LT(field, value), done: done} +} + +// Uint8LTE applies the LTE operation on the given value. +func Uint8LTE(v uint8) Uint8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint8P{P: LTE(field, value), done: done} +} + +// Uint8GT applies the GT operation on the given value. +func Uint8GT(v uint8) Uint8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint8P{P: GT(field, value), done: done} +} + +// Uint8GTE applies the GTE operation on the given value. +func Uint8GTE(v uint8) Uint8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint8P{P: GTE(field, value), done: done} +} + +// Uint8Or returns a composed predicate that represents the logical OR predicate. +func Uint8Or(x, y Uint8P, z ...Uint8P) Uint8P { + expr := &uint8P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Uint8And returns a composed predicate that represents the logical AND predicate. +func Uint8And(x, y Uint8P, z ...Uint8P) Uint8P { + expr := &uint8P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Uint8Not returns a predicate that represents the logical negation of the given predicate. +func Uint8Not(x Uint8P) Uint8P { + expr := &uint8P{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// Uint16P is the interface for predicates of type uint16 (`type P[uint16]`). +type Uint16P interface { + Fielder + uint16() +} + +// uint16P implements the Uint16P interface. +type uint16P struct { + P + done func(string) +} + +func (p *uint16P) Field(name string) P { + p.done(name) + return p.P +} + +func (*uint16P) uint16() {} + +// Uint16Nil applies the Nil operation +func Uint16Nil() Uint16P { + field := &Field{} + done := func(name string) { field.Name = name } + return &uint16P{P: EQ(field, (*Value)(nil)), done: done} +} + +// Uint16NotNil applies the NotNil operation +func Uint16NotNil() Uint16P { + field := &Field{} + done := func(name string) { field.Name = name } + return &uint16P{P: NEQ(field, (*Value)(nil)), done: done} +} + +// Uint16EQ applies the EQ operation on the given value. +func Uint16EQ(v uint16) Uint16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint16P{P: EQ(field, value), done: done} +} + +// Uint16NEQ applies the NEQ operation on the given value. +func Uint16NEQ(v uint16) Uint16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint16P{P: NEQ(field, value), done: done} +} + +// Uint16LT applies the LT operation on the given value. +func Uint16LT(v uint16) Uint16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint16P{P: LT(field, value), done: done} +} + +// Uint16LTE applies the LTE operation on the given value. +func Uint16LTE(v uint16) Uint16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint16P{P: LTE(field, value), done: done} +} + +// Uint16GT applies the GT operation on the given value. +func Uint16GT(v uint16) Uint16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint16P{P: GT(field, value), done: done} +} + +// Uint16GTE applies the GTE operation on the given value. +func Uint16GTE(v uint16) Uint16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint16P{P: GTE(field, value), done: done} +} + +// Uint16Or returns a composed predicate that represents the logical OR predicate. +func Uint16Or(x, y Uint16P, z ...Uint16P) Uint16P { + expr := &uint16P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Uint16And returns a composed predicate that represents the logical AND predicate. +func Uint16And(x, y Uint16P, z ...Uint16P) Uint16P { + expr := &uint16P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Uint16Not returns a predicate that represents the logical negation of the given predicate. +func Uint16Not(x Uint16P) Uint16P { + expr := &uint16P{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// Uint32P is the interface for predicates of type uint32 (`type P[uint32]`). +type Uint32P interface { + Fielder + uint32() +} + +// uint32P implements the Uint32P interface. +type uint32P struct { + P + done func(string) +} + +func (p *uint32P) Field(name string) P { + p.done(name) + return p.P +} + +func (*uint32P) uint32() {} + +// Uint32Nil applies the Nil operation +func Uint32Nil() Uint32P { + field := &Field{} + done := func(name string) { field.Name = name } + return &uint32P{P: EQ(field, (*Value)(nil)), done: done} +} + +// Uint32NotNil applies the NotNil operation +func Uint32NotNil() Uint32P { + field := &Field{} + done := func(name string) { field.Name = name } + return &uint32P{P: NEQ(field, (*Value)(nil)), done: done} +} + +// Uint32EQ applies the EQ operation on the given value. +func Uint32EQ(v uint32) Uint32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint32P{P: EQ(field, value), done: done} +} + +// Uint32NEQ applies the NEQ operation on the given value. +func Uint32NEQ(v uint32) Uint32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint32P{P: NEQ(field, value), done: done} +} + +// Uint32LT applies the LT operation on the given value. +func Uint32LT(v uint32) Uint32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint32P{P: LT(field, value), done: done} +} + +// Uint32LTE applies the LTE operation on the given value. +func Uint32LTE(v uint32) Uint32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint32P{P: LTE(field, value), done: done} +} + +// Uint32GT applies the GT operation on the given value. +func Uint32GT(v uint32) Uint32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint32P{P: GT(field, value), done: done} +} + +// Uint32GTE applies the GTE operation on the given value. +func Uint32GTE(v uint32) Uint32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint32P{P: GTE(field, value), done: done} +} + +// Uint32Or returns a composed predicate that represents the logical OR predicate. +func Uint32Or(x, y Uint32P, z ...Uint32P) Uint32P { + expr := &uint32P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Uint32And returns a composed predicate that represents the logical AND predicate. +func Uint32And(x, y Uint32P, z ...Uint32P) Uint32P { + expr := &uint32P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Uint32Not returns a predicate that represents the logical negation of the given predicate. +func Uint32Not(x Uint32P) Uint32P { + expr := &uint32P{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// Uint64P is the interface for predicates of type uint64 (`type P[uint64]`). +type Uint64P interface { + Fielder + uint64() +} + +// uint64P implements the Uint64P interface. +type uint64P struct { + P + done func(string) +} + +func (p *uint64P) Field(name string) P { + p.done(name) + return p.P +} + +func (*uint64P) uint64() {} + +// Uint64Nil applies the Nil operation +func Uint64Nil() Uint64P { + field := &Field{} + done := func(name string) { field.Name = name } + return &uint64P{P: EQ(field, (*Value)(nil)), done: done} +} + +// Uint64NotNil applies the NotNil operation +func Uint64NotNil() Uint64P { + field := &Field{} + done := func(name string) { field.Name = name } + return &uint64P{P: NEQ(field, (*Value)(nil)), done: done} +} + +// Uint64EQ applies the EQ operation on the given value. +func Uint64EQ(v uint64) Uint64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint64P{P: EQ(field, value), done: done} +} + +// Uint64NEQ applies the NEQ operation on the given value. +func Uint64NEQ(v uint64) Uint64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint64P{P: NEQ(field, value), done: done} +} + +// Uint64LT applies the LT operation on the given value. +func Uint64LT(v uint64) Uint64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint64P{P: LT(field, value), done: done} +} + +// Uint64LTE applies the LTE operation on the given value. +func Uint64LTE(v uint64) Uint64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint64P{P: LTE(field, value), done: done} +} + +// Uint64GT applies the GT operation on the given value. +func Uint64GT(v uint64) Uint64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint64P{P: GT(field, value), done: done} +} + +// Uint64GTE applies the GTE operation on the given value. +func Uint64GTE(v uint64) Uint64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &uint64P{P: GTE(field, value), done: done} +} + +// Uint64Or returns a composed predicate that represents the logical OR predicate. +func Uint64Or(x, y Uint64P, z ...Uint64P) Uint64P { + expr := &uint64P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Uint64And returns a composed predicate that represents the logical AND predicate. +func Uint64And(x, y Uint64P, z ...Uint64P) Uint64P { + expr := &uint64P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Uint64Not returns a predicate that represents the logical negation of the given predicate. +func Uint64Not(x Uint64P) Uint64P { + expr := &uint64P{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// IntP is the interface for predicates of type int (`type P[int]`). +type IntP interface { + Fielder + int() +} + +// intP implements the IntP interface. +type intP struct { + P + done func(string) +} + +func (p *intP) Field(name string) P { + p.done(name) + return p.P +} + +func (*intP) int() {} + +// IntNil applies the Nil operation +func IntNil() IntP { + field := &Field{} + done := func(name string) { field.Name = name } + return &intP{P: EQ(field, (*Value)(nil)), done: done} +} + +// IntNotNil applies the NotNil operation +func IntNotNil() IntP { + field := &Field{} + done := func(name string) { field.Name = name } + return &intP{P: NEQ(field, (*Value)(nil)), done: done} +} + +// IntEQ applies the EQ operation on the given value. +func IntEQ(v int) IntP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &intP{P: EQ(field, value), done: done} +} + +// IntNEQ applies the NEQ operation on the given value. +func IntNEQ(v int) IntP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &intP{P: NEQ(field, value), done: done} +} + +// IntLT applies the LT operation on the given value. +func IntLT(v int) IntP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &intP{P: LT(field, value), done: done} +} + +// IntLTE applies the LTE operation on the given value. +func IntLTE(v int) IntP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &intP{P: LTE(field, value), done: done} +} + +// IntGT applies the GT operation on the given value. +func IntGT(v int) IntP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &intP{P: GT(field, value), done: done} +} + +// IntGTE applies the GTE operation on the given value. +func IntGTE(v int) IntP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &intP{P: GTE(field, value), done: done} +} + +// IntOr returns a composed predicate that represents the logical OR predicate. +func IntOr(x, y IntP, z ...IntP) IntP { + expr := &intP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// IntAnd returns a composed predicate that represents the logical AND predicate. +func IntAnd(x, y IntP, z ...IntP) IntP { + expr := &intP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// IntNot returns a predicate that represents the logical negation of the given predicate. +func IntNot(x IntP) IntP { + expr := &intP{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// Int8P is the interface for predicates of type int8 (`type P[int8]`). +type Int8P interface { + Fielder + int8() +} + +// int8P implements the Int8P interface. +type int8P struct { + P + done func(string) +} + +func (p *int8P) Field(name string) P { + p.done(name) + return p.P +} + +func (*int8P) int8() {} + +// Int8Nil applies the Nil operation +func Int8Nil() Int8P { + field := &Field{} + done := func(name string) { field.Name = name } + return &int8P{P: EQ(field, (*Value)(nil)), done: done} +} + +// Int8NotNil applies the NotNil operation +func Int8NotNil() Int8P { + field := &Field{} + done := func(name string) { field.Name = name } + return &int8P{P: NEQ(field, (*Value)(nil)), done: done} +} + +// Int8EQ applies the EQ operation on the given value. +func Int8EQ(v int8) Int8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int8P{P: EQ(field, value), done: done} +} + +// Int8NEQ applies the NEQ operation on the given value. +func Int8NEQ(v int8) Int8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int8P{P: NEQ(field, value), done: done} +} + +// Int8LT applies the LT operation on the given value. +func Int8LT(v int8) Int8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int8P{P: LT(field, value), done: done} +} + +// Int8LTE applies the LTE operation on the given value. +func Int8LTE(v int8) Int8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int8P{P: LTE(field, value), done: done} +} + +// Int8GT applies the GT operation on the given value. +func Int8GT(v int8) Int8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int8P{P: GT(field, value), done: done} +} + +// Int8GTE applies the GTE operation on the given value. +func Int8GTE(v int8) Int8P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int8P{P: GTE(field, value), done: done} +} + +// Int8Or returns a composed predicate that represents the logical OR predicate. +func Int8Or(x, y Int8P, z ...Int8P) Int8P { + expr := &int8P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Int8And returns a composed predicate that represents the logical AND predicate. +func Int8And(x, y Int8P, z ...Int8P) Int8P { + expr := &int8P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Int8Not returns a predicate that represents the logical negation of the given predicate. +func Int8Not(x Int8P) Int8P { + expr := &int8P{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// Int16P is the interface for predicates of type int16 (`type P[int16]`). +type Int16P interface { + Fielder + int16() +} + +// int16P implements the Int16P interface. +type int16P struct { + P + done func(string) +} + +func (p *int16P) Field(name string) P { + p.done(name) + return p.P +} + +func (*int16P) int16() {} + +// Int16Nil applies the Nil operation +func Int16Nil() Int16P { + field := &Field{} + done := func(name string) { field.Name = name } + return &int16P{P: EQ(field, (*Value)(nil)), done: done} +} + +// Int16NotNil applies the NotNil operation +func Int16NotNil() Int16P { + field := &Field{} + done := func(name string) { field.Name = name } + return &int16P{P: NEQ(field, (*Value)(nil)), done: done} +} + +// Int16EQ applies the EQ operation on the given value. +func Int16EQ(v int16) Int16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int16P{P: EQ(field, value), done: done} +} + +// Int16NEQ applies the NEQ operation on the given value. +func Int16NEQ(v int16) Int16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int16P{P: NEQ(field, value), done: done} +} + +// Int16LT applies the LT operation on the given value. +func Int16LT(v int16) Int16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int16P{P: LT(field, value), done: done} +} + +// Int16LTE applies the LTE operation on the given value. +func Int16LTE(v int16) Int16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int16P{P: LTE(field, value), done: done} +} + +// Int16GT applies the GT operation on the given value. +func Int16GT(v int16) Int16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int16P{P: GT(field, value), done: done} +} + +// Int16GTE applies the GTE operation on the given value. +func Int16GTE(v int16) Int16P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int16P{P: GTE(field, value), done: done} +} + +// Int16Or returns a composed predicate that represents the logical OR predicate. +func Int16Or(x, y Int16P, z ...Int16P) Int16P { + expr := &int16P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Int16And returns a composed predicate that represents the logical AND predicate. +func Int16And(x, y Int16P, z ...Int16P) Int16P { + expr := &int16P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Int16Not returns a predicate that represents the logical negation of the given predicate. +func Int16Not(x Int16P) Int16P { + expr := &int16P{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// Int32P is the interface for predicates of type int32 (`type P[int32]`). +type Int32P interface { + Fielder + int32() +} + +// int32P implements the Int32P interface. +type int32P struct { + P + done func(string) +} + +func (p *int32P) Field(name string) P { + p.done(name) + return p.P +} + +func (*int32P) int32() {} + +// Int32Nil applies the Nil operation +func Int32Nil() Int32P { + field := &Field{} + done := func(name string) { field.Name = name } + return &int32P{P: EQ(field, (*Value)(nil)), done: done} +} + +// Int32NotNil applies the NotNil operation +func Int32NotNil() Int32P { + field := &Field{} + done := func(name string) { field.Name = name } + return &int32P{P: NEQ(field, (*Value)(nil)), done: done} +} + +// Int32EQ applies the EQ operation on the given value. +func Int32EQ(v int32) Int32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int32P{P: EQ(field, value), done: done} +} + +// Int32NEQ applies the NEQ operation on the given value. +func Int32NEQ(v int32) Int32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int32P{P: NEQ(field, value), done: done} +} + +// Int32LT applies the LT operation on the given value. +func Int32LT(v int32) Int32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int32P{P: LT(field, value), done: done} +} + +// Int32LTE applies the LTE operation on the given value. +func Int32LTE(v int32) Int32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int32P{P: LTE(field, value), done: done} +} + +// Int32GT applies the GT operation on the given value. +func Int32GT(v int32) Int32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int32P{P: GT(field, value), done: done} +} + +// Int32GTE applies the GTE operation on the given value. +func Int32GTE(v int32) Int32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int32P{P: GTE(field, value), done: done} +} + +// Int32Or returns a composed predicate that represents the logical OR predicate. +func Int32Or(x, y Int32P, z ...Int32P) Int32P { + expr := &int32P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Int32And returns a composed predicate that represents the logical AND predicate. +func Int32And(x, y Int32P, z ...Int32P) Int32P { + expr := &int32P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Int32Not returns a predicate that represents the logical negation of the given predicate. +func Int32Not(x Int32P) Int32P { + expr := &int32P{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// Int64P is the interface for predicates of type int64 (`type P[int64]`). +type Int64P interface { + Fielder + int64() +} + +// int64P implements the Int64P interface. +type int64P struct { + P + done func(string) +} + +func (p *int64P) Field(name string) P { + p.done(name) + return p.P +} + +func (*int64P) int64() {} + +// Int64Nil applies the Nil operation +func Int64Nil() Int64P { + field := &Field{} + done := func(name string) { field.Name = name } + return &int64P{P: EQ(field, (*Value)(nil)), done: done} +} + +// Int64NotNil applies the NotNil operation +func Int64NotNil() Int64P { + field := &Field{} + done := func(name string) { field.Name = name } + return &int64P{P: NEQ(field, (*Value)(nil)), done: done} +} + +// Int64EQ applies the EQ operation on the given value. +func Int64EQ(v int64) Int64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int64P{P: EQ(field, value), done: done} +} + +// Int64NEQ applies the NEQ operation on the given value. +func Int64NEQ(v int64) Int64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int64P{P: NEQ(field, value), done: done} +} + +// Int64LT applies the LT operation on the given value. +func Int64LT(v int64) Int64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int64P{P: LT(field, value), done: done} +} + +// Int64LTE applies the LTE operation on the given value. +func Int64LTE(v int64) Int64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int64P{P: LTE(field, value), done: done} +} + +// Int64GT applies the GT operation on the given value. +func Int64GT(v int64) Int64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int64P{P: GT(field, value), done: done} +} + +// Int64GTE applies the GTE operation on the given value. +func Int64GTE(v int64) Int64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &int64P{P: GTE(field, value), done: done} +} + +// Int64Or returns a composed predicate that represents the logical OR predicate. +func Int64Or(x, y Int64P, z ...Int64P) Int64P { + expr := &int64P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Int64And returns a composed predicate that represents the logical AND predicate. +func Int64And(x, y Int64P, z ...Int64P) Int64P { + expr := &int64P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Int64Not returns a predicate that represents the logical negation of the given predicate. +func Int64Not(x Int64P) Int64P { + expr := &int64P{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// Float32P is the interface for predicates of type float32 (`type P[float32]`). +type Float32P interface { + Fielder + float32() +} + +// float32P implements the Float32P interface. +type float32P struct { + P + done func(string) +} + +func (p *float32P) Field(name string) P { + p.done(name) + return p.P +} + +func (*float32P) float32() {} + +// Float32Nil applies the Nil operation +func Float32Nil() Float32P { + field := &Field{} + done := func(name string) { field.Name = name } + return &float32P{P: EQ(field, (*Value)(nil)), done: done} +} + +// Float32NotNil applies the NotNil operation +func Float32NotNil() Float32P { + field := &Field{} + done := func(name string) { field.Name = name } + return &float32P{P: NEQ(field, (*Value)(nil)), done: done} +} + +// Float32EQ applies the EQ operation on the given value. +func Float32EQ(v float32) Float32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float32P{P: EQ(field, value), done: done} +} + +// Float32NEQ applies the NEQ operation on the given value. +func Float32NEQ(v float32) Float32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float32P{P: NEQ(field, value), done: done} +} + +// Float32LT applies the LT operation on the given value. +func Float32LT(v float32) Float32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float32P{P: LT(field, value), done: done} +} + +// Float32LTE applies the LTE operation on the given value. +func Float32LTE(v float32) Float32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float32P{P: LTE(field, value), done: done} +} + +// Float32GT applies the GT operation on the given value. +func Float32GT(v float32) Float32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float32P{P: GT(field, value), done: done} +} + +// Float32GTE applies the GTE operation on the given value. +func Float32GTE(v float32) Float32P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float32P{P: GTE(field, value), done: done} +} + +// Float32Or returns a composed predicate that represents the logical OR predicate. +func Float32Or(x, y Float32P, z ...Float32P) Float32P { + expr := &float32P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Float32And returns a composed predicate that represents the logical AND predicate. +func Float32And(x, y Float32P, z ...Float32P) Float32P { + expr := &float32P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Float32Not returns a predicate that represents the logical negation of the given predicate. +func Float32Not(x Float32P) Float32P { + expr := &float32P{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// Float64P is the interface for predicates of type float64 (`type P[float64]`). +type Float64P interface { + Fielder + float64() +} + +// float64P implements the Float64P interface. +type float64P struct { + P + done func(string) +} + +func (p *float64P) Field(name string) P { + p.done(name) + return p.P +} + +func (*float64P) float64() {} + +// Float64Nil applies the Nil operation +func Float64Nil() Float64P { + field := &Field{} + done := func(name string) { field.Name = name } + return &float64P{P: EQ(field, (*Value)(nil)), done: done} +} + +// Float64NotNil applies the NotNil operation +func Float64NotNil() Float64P { + field := &Field{} + done := func(name string) { field.Name = name } + return &float64P{P: NEQ(field, (*Value)(nil)), done: done} +} + +// Float64EQ applies the EQ operation on the given value. +func Float64EQ(v float64) Float64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float64P{P: EQ(field, value), done: done} +} + +// Float64NEQ applies the NEQ operation on the given value. +func Float64NEQ(v float64) Float64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float64P{P: NEQ(field, value), done: done} +} + +// Float64LT applies the LT operation on the given value. +func Float64LT(v float64) Float64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float64P{P: LT(field, value), done: done} +} + +// Float64LTE applies the LTE operation on the given value. +func Float64LTE(v float64) Float64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float64P{P: LTE(field, value), done: done} +} + +// Float64GT applies the GT operation on the given value. +func Float64GT(v float64) Float64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float64P{P: GT(field, value), done: done} +} + +// Float64GTE applies the GTE operation on the given value. +func Float64GTE(v float64) Float64P { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &float64P{P: GTE(field, value), done: done} +} + +// Float64Or returns a composed predicate that represents the logical OR predicate. +func Float64Or(x, y Float64P, z ...Float64P) Float64P { + expr := &float64P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Float64And returns a composed predicate that represents the logical AND predicate. +func Float64And(x, y Float64P, z ...Float64P) Float64P { + expr := &float64P{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// Float64Not returns a predicate that represents the logical negation of the given predicate. +func Float64Not(x Float64P) Float64P { + expr := &float64P{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// StringP is the interface for predicates of type string (`type P[string]`). +type StringP interface { + Fielder + string() +} + +// stringP implements the StringP interface. +type stringP struct { + P + done func(string) +} + +func (p *stringP) Field(name string) P { + p.done(name) + return p.P +} + +func (*stringP) string() {} + +// StringNil applies the Nil operation +func StringNil() StringP { + field := &Field{} + done := func(name string) { field.Name = name } + return &stringP{P: EQ(field, (*Value)(nil)), done: done} +} + +// StringNotNil applies the NotNil operation +func StringNotNil() StringP { + field := &Field{} + done := func(name string) { field.Name = name } + return &stringP{P: NEQ(field, (*Value)(nil)), done: done} +} + +// StringEQ applies the EQ operation on the given value. +func StringEQ(v string) StringP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &stringP{P: EQ(field, value), done: done} +} + +// StringNEQ applies the NEQ operation on the given value. +func StringNEQ(v string) StringP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &stringP{P: NEQ(field, value), done: done} +} + +// StringLT applies the LT operation on the given value. +func StringLT(v string) StringP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &stringP{P: LT(field, value), done: done} +} + +// StringLTE applies the LTE operation on the given value. +func StringLTE(v string) StringP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &stringP{P: LTE(field, value), done: done} +} + +// StringGT applies the GT operation on the given value. +func StringGT(v string) StringP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &stringP{P: GT(field, value), done: done} +} + +// StringGTE applies the GTE operation on the given value. +func StringGTE(v string) StringP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &stringP{P: GTE(field, value), done: done} +} + +// StringOr returns a composed predicate that represents the logical OR predicate. +func StringOr(x, y StringP, z ...StringP) StringP { + expr := &stringP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// StringAnd returns a composed predicate that represents the logical AND predicate. +func StringAnd(x, y StringP, z ...StringP) StringP { + expr := &stringP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// StringNot returns a predicate that represents the logical negation of the given predicate. +func StringNot(x StringP) StringP { + expr := &stringP{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// ValueP is the interface for predicates of type [16]byte (`type P[[16]byte]`). +type ValueP interface { + Fielder + value() +} + +// valueP implements the ValueP interface. +type valueP struct { + P + done func(string) +} + +func (p *valueP) Field(name string) P { + p.done(name) + return p.P +} + +func (*valueP) value() {} + +// ValueNil applies the Nil operation +func ValueNil() ValueP { + field := &Field{} + done := func(name string) { field.Name = name } + return &valueP{P: EQ(field, (*Value)(nil)), done: done} +} + +// ValueNotNil applies the NotNil operation +func ValueNotNil() ValueP { + field := &Field{} + done := func(name string) { field.Name = name } + return &valueP{P: NEQ(field, (*Value)(nil)), done: done} +} + +// ValueEQ applies the EQ operation on the given value. +func ValueEQ(v driver.Valuer) ValueP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &valueP{P: EQ(field, value), done: done} +} + +// ValueNEQ applies the NEQ operation on the given value. +func ValueNEQ(v driver.Valuer) ValueP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &valueP{P: NEQ(field, value), done: done} +} + +// ValueOr returns a composed predicate that represents the logical OR predicate. +func ValueOr(x, y ValueP, z ...ValueP) ValueP { + expr := &valueP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// ValueAnd returns a composed predicate that represents the logical AND predicate. +func ValueAnd(x, y ValueP, z ...ValueP) ValueP { + expr := &valueP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// ValueNot returns a predicate that represents the logical negation of the given predicate. +func ValueNot(x ValueP) ValueP { + expr := &valueP{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} + +// OtherP is the interface for predicates of type other (`type P[other]`). +type OtherP interface { + Fielder + other() +} + +// otherP implements the OtherP interface. +type otherP struct { + P + done func(string) +} + +func (p *otherP) Field(name string) P { + p.done(name) + return p.P +} + +func (*otherP) other() {} + +// OtherNil applies the Nil operation +func OtherNil() OtherP { + field := &Field{} + done := func(name string) { field.Name = name } + return &otherP{P: EQ(field, (*Value)(nil)), done: done} +} + +// OtherNotNil applies the NotNil operation +func OtherNotNil() OtherP { + field := &Field{} + done := func(name string) { field.Name = name } + return &otherP{P: NEQ(field, (*Value)(nil)), done: done} +} + +// OtherEQ applies the EQ operation on the given value. +func OtherEQ(v driver.Valuer) OtherP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &otherP{P: EQ(field, value), done: done} +} + +// OtherNEQ applies the NEQ operation on the given value. +func OtherNEQ(v driver.Valuer) OtherP { + field := &Field{} + value := &Value{V: v} + done := func(name string) { field.Name = name } + return &otherP{P: NEQ(field, value), done: done} +} + +// OtherOr returns a composed predicate that represents the logical OR predicate. +func OtherOr(x, y OtherP, z ...OtherP) OtherP { + expr := &otherP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = Or(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// OtherAnd returns a composed predicate that represents the logical AND predicate. +func OtherAnd(x, y OtherP, z ...OtherP) OtherP { + expr := &otherP{} + expr.done = func(name string) { + zs := make([]P, len(z)) + for i := range z { + zs[i] = z[i].Field(name) + } + expr.P = And(x.Field(name), y.Field(name), zs...) + } + return expr +} + +// OtherNot returns a predicate that represents the logical negation of the given predicate. +func OtherNot(x OtherP) OtherP { + expr := &otherP{} + expr.done = func(name string) { + expr.P = Not(x.Field(name)) + } + return expr +} diff --git a/vendor/entgo.io/ent/op_string.go b/vendor/entgo.io/ent/op_string.go new file mode 100644 index 00000000..b8bf9da4 --- /dev/null +++ b/vendor/entgo.io/ent/op_string.go @@ -0,0 +1,43 @@ +// Code generated by "stringer -type Op"; DO NOT EDIT. + +package ent + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[OpCreate-1] + _ = x[OpUpdate-2] + _ = x[OpUpdateOne-4] + _ = x[OpDelete-8] + _ = x[OpDeleteOne-16] +} + +const ( + _Op_name_0 = "OpCreateOpUpdate" + _Op_name_1 = "OpUpdateOne" + _Op_name_2 = "OpDelete" + _Op_name_3 = "OpDeleteOne" +) + +var ( + _Op_index_0 = [...]uint8{0, 8, 16} +) + +func (i Op) String() string { + switch { + case 1 <= i && i <= 2: + i -= 1 + return _Op_name_0[_Op_index_0[i]:_Op_index_0[i+1]] + case i == 4: + return _Op_name_1 + case i == 8: + return _Op_name_2 + case i == 16: + return _Op_name_3 + default: + return "Op(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/entgo.io/ent/schema/BUILD b/vendor/entgo.io/ent/schema/BUILD new file mode 100644 index 00000000..be26dcfb --- /dev/null +++ b/vendor/entgo.io/ent/schema/BUILD @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "schema", + srcs = ["schema.go"], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/schema", + importpath = "entgo.io/ent/schema", + visibility = ["//visibility:public"], +) diff --git a/vendor/entgo.io/ent/schema/edge/BUILD b/vendor/entgo.io/ent/schema/edge/BUILD new file mode 100644 index 00000000..6f611796 --- /dev/null +++ b/vendor/entgo.io/ent/schema/edge/BUILD @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "edge", + srcs = [ + "annotation.go", + "edge.go", + ], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/schema/edge", + importpath = "entgo.io/ent/schema/edge", + visibility = ["//visibility:public"], + deps = ["//vendor/entgo.io/ent/schema"], +) diff --git a/vendor/entgo.io/ent/schema/edge/annotation.go b/vendor/entgo.io/ent/schema/edge/annotation.go new file mode 100644 index 00000000..9be39603 --- /dev/null +++ b/vendor/entgo.io/ent/schema/edge/annotation.go @@ -0,0 +1,49 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package edge + +import "entgo.io/ent/schema" + +// Annotation is a builtin schema annotation for +// configuring the edges' behavior in codegen. +type Annotation struct { + // The StructTag option allows overriding the struct-tag + // of the `Edges` field in the generated entity. For example: + // + // edge.Annotation{ + // StructTag: `json:"pet_edges"` + // } + // + StructTag string +} + +// Name describes the annotation name. +func (Annotation) Name() string { + return "Edges" +} + +// Merge implements the schema.Merger interface. +func (a Annotation) Merge(other schema.Annotation) schema.Annotation { + var ant Annotation + switch other := other.(type) { + case Annotation: + ant = other + case *Annotation: + if other != nil { + ant = *other + } + default: + return a + } + if tag := ant.StructTag; tag != "" { + a.StructTag = tag + } + return a +} + +var ( + _ schema.Annotation = (*Annotation)(nil) + _ schema.Merger = (*Annotation)(nil) +) diff --git a/vendor/entgo.io/ent/schema/edge/edge.go b/vendor/entgo.io/ent/schema/edge/edge.go new file mode 100644 index 00000000..26aad212 --- /dev/null +++ b/vendor/entgo.io/ent/schema/edge/edge.go @@ -0,0 +1,276 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package edge + +import ( + "reflect" + + "entgo.io/ent/schema" +) + +// A Descriptor for edge configuration. +type Descriptor struct { + Tag string // struct tag. + Type string // edge type. + Name string // edge name. + Field string // edge field name (e.g. foreign-key). + RefName string // ref name; inverse only. + Ref *Descriptor // edge reference; to/from of the same type. + Through *struct{ N, T string } // through type and name. + Unique bool // unique edge. + Inverse bool // inverse edge. + Required bool // required on creation. + Immutable bool // create only edge. + StorageKey *StorageKey // optional storage-key configuration. + Annotations []schema.Annotation // edge annotations. + Comment string // edge comment. +} + +// To defines an association edge between two vertices. +func To(name string, t any) *assocBuilder { + return &assocBuilder{desc: &Descriptor{Name: name, Type: typ(t)}} +} + +// From represents a reversed-edge between two vertices that has a back-reference to its source edge. +func From(name string, t any) *inverseBuilder { + return &inverseBuilder{desc: &Descriptor{Name: name, Type: typ(t), Inverse: true}} +} + +func typ(t any) string { + if rt := reflect.TypeOf(t); rt.NumIn() > 0 { + return rt.In(0).Name() + } + return "" +} + +// assocBuilder is the builder for assoc edges. +type assocBuilder struct { + desc *Descriptor +} + +// Unique sets the edge type to be unique. Basically, it limits the edge to be one of the two: +// one2one or one2many. one2one applied if the inverse-edge is also unique. +func (b *assocBuilder) Unique() *assocBuilder { + b.desc.Unique = true + return b +} + +// Required indicates that this edge is a required field on creation. +// Unlike fields, edges are optional by default. +func (b *assocBuilder) Required() *assocBuilder { + b.desc.Required = true + return b +} + +// Immutable indicates that this edge cannot be updated. +func (b *assocBuilder) Immutable() *assocBuilder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the assoc edge. +func (b *assocBuilder) StructTag(s string) *assocBuilder { + b.desc.Tag = s + return b +} + +// From creates an inverse-edge with the same type. +func (b *assocBuilder) From(name string) *inverseBuilder { + return &inverseBuilder{desc: &Descriptor{Name: name, Type: b.desc.Type, Inverse: true, Ref: b.desc}} +} + +// Field is used to bind an edge (with a foreign-key) to a field in the schema. +// +// field.Int("owner_id"). +// Optional() +// +// edge.To("owner", User.Type). +// Field("owner_id"). +// Unique(), +func (b *assocBuilder) Field(f string) *assocBuilder { + b.desc.Field = f + return b +} + +// Through allows setting an "edge schema" to interact explicitly with M2M edges. +// +// edge.To("friends", User.Type). +// Through("friendships", Friendship.Type) +func (b *assocBuilder) Through(name string, t any) *assocBuilder { + b.desc.Through = &struct{ N, T string }{N: name, T: typ(t)} + return b +} + +// Comment used to put annotations on the schema. +func (b *assocBuilder) Comment(c string) *assocBuilder { + b.desc.Comment = c + return b +} + +// StorageKey sets the storage key of the edge. +// +// edge.To("groups", Group.Type). +// StorageKey(edge.Table("user_groups"), edge.Columns("user_id", "group_id")) +func (b *assocBuilder) StorageKey(opts ...StorageOption) *assocBuilder { + if b.desc.StorageKey == nil { + b.desc.StorageKey = &StorageKey{} + } + for i := range opts { + opts[i](b.desc.StorageKey) + } + return b +} + +// Annotations adds a list of annotations to the edge object to be used by +// codegen extensions. +// +// edge.To("pets", Pet.Type). +// Annotations(entgql.Bind()) +func (b *assocBuilder) Annotations(annotations ...schema.Annotation) *assocBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Descriptor interface. +func (b *assocBuilder) Descriptor() *Descriptor { + return b.desc +} + +// inverseBuilder is the builder for inverse edges. +type inverseBuilder struct { + desc *Descriptor +} + +// Ref sets the referenced-edge of this inverse edge. +func (b *inverseBuilder) Ref(ref string) *inverseBuilder { + b.desc.RefName = ref + return b +} + +// Unique sets the edge type to be unique. Basically, it limits the edge to be one of the two: +// one-2-one or one-2-many. one-2-one applied if the inverse-edge is also unique. +func (b *inverseBuilder) Unique() *inverseBuilder { + b.desc.Unique = true + return b +} + +// Required indicates that this edge is a required field on creation. +// Unlike fields, edges are optional by default. +func (b *inverseBuilder) Required() *inverseBuilder { + b.desc.Required = true + return b +} + +// Immutable indicates that this edge cannot be updated. +func (b *inverseBuilder) Immutable() *inverseBuilder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the inverse edge. +func (b *inverseBuilder) StructTag(s string) *inverseBuilder { + b.desc.Tag = s + return b +} + +// Comment used to put annotations on the schema. +func (b *inverseBuilder) Comment(c string) *inverseBuilder { + b.desc.Comment = c + return b +} + +// Field is used to bind an edge (with a foreign-key) to a field in the schema. +// +// field.Int("owner_id"). +// Optional() +// +// edge.From("owner", User.Type). +// Ref("pets"). +// Field("owner_id"). +// Unique(), +func (b *inverseBuilder) Field(f string) *inverseBuilder { + b.desc.Field = f + return b +} + +// Through allows setting an "edge schema" to interact explicitly with M2M edges. +// +// edge.From("liked_users", User.Type). +// Ref("liked_tweets"). +// Through("likes", TweetLike.Type) +func (b *inverseBuilder) Through(name string, t any) *inverseBuilder { + b.desc.Through = &struct{ N, T string }{N: name, T: typ(t)} + return b +} + +// Annotations adds a list of annotations to the edge object to be used by +// codegen extensions. +// +// edge.From("owner", User.Type). +// Ref("pets"). +// Unique(). +// Annotations(entgql.Bind()) +func (b *inverseBuilder) Annotations(annotations ...schema.Annotation) *inverseBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Descriptor interface. +func (b *inverseBuilder) Descriptor() *Descriptor { + return b.desc +} + +// StorageKey holds the configuration for edge storage-key. +type StorageKey struct { + Table string // Table or label. + Symbols []string // Symbols/names of the foreign-key constraints. + Columns []string // Foreign-key columns. +} + +// StorageOption allows for setting the storage configuration using functional options. +type StorageOption func(*StorageKey) + +// Table sets the table name option for M2M edges. +func Table(name string) StorageOption { + return func(key *StorageKey) { + key.Table = name + } +} + +// Symbol sets the symbol/name of the foreign-key constraint for O2O, O2M and M2O edges. +// Note that, for M2M edges (2 columns and 2 constraints), use the edge.Symbols option. +func Symbol(symbol string) StorageOption { + return func(key *StorageKey) { + key.Symbols = []string{symbol} + } +} + +// Symbols sets the symbol/name of the foreign-key constraints for M2M edges. +// The 1st column defines the name of the "To" edge, and the 2nd defines +// the name of the "From" edge (inverse edge). +// Note that, for O2O, O2M and M2O edges, use the edge.Symbol option. +func Symbols(to, from string) StorageOption { + return func(key *StorageKey) { + key.Symbols = []string{to, from} + } +} + +// Column sets the foreign-key column name option for O2O, O2M and M2O edges. +// Note that, for M2M edges (2 columns), use the edge.Columns option. +func Column(name string) StorageOption { + return func(key *StorageKey) { + key.Columns = []string{name} + } +} + +// Columns sets the foreign-key column names option for M2M edges. +// The 1st column defines the name of the "To" edge, and the 2nd defines +// the name of the "From" edge (inverse edge). +// Note that, for O2O, O2M and M2O edges, use the edge.Column option. +func Columns(to, from string) StorageOption { + return func(key *StorageKey) { + key.Columns = []string{to, from} + } +} diff --git a/vendor/entgo.io/ent/schema/field/BUILD b/vendor/entgo.io/ent/schema/field/BUILD new file mode 100644 index 00000000..f42c88d7 --- /dev/null +++ b/vendor/entgo.io/ent/schema/field/BUILD @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "field", + srcs = [ + "annotation.go", + "field.go", + "numeric.go", + "type.go", + ], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/schema/field", + importpath = "entgo.io/ent/schema/field", + visibility = ["//visibility:public"], + deps = ["//vendor/entgo.io/ent/schema"], +) diff --git a/vendor/entgo.io/ent/schema/field/annotation.go b/vendor/entgo.io/ent/schema/field/annotation.go new file mode 100644 index 00000000..91aeb885 --- /dev/null +++ b/vendor/entgo.io/ent/schema/field/annotation.go @@ -0,0 +1,81 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package field + +import "entgo.io/ent/schema" + +// Annotation is a builtin schema annotation for +// configuring the schema fields in codegen. +type Annotation struct { + // The StructTag option allows overriding the struct-tag + // of the fields in the generated entity. For example: + // + // field.Annotation{ + // StructTag: map[string]string{ + // "id": `json:"id,omitempty" yaml:"-"`, + // }, + // } + // + StructTag map[string]string + + // ID defines a multi-field schema identifier. Note, + // the annotation is valid only for edge schemas. + // + // func (TweetLike) Annotations() []schema.Annotation { + // return []schema.Annotation{ + // field.ID("user_id", "tweet_id"), + // } + // } + // + ID []string +} + +// ID defines a multi-field schema identifier. Note, the +// annotation is valid only for edge schemas. +// +// func (TweetLike) Annotations() []schema.Annotation { +// return []schema.Annotation{ +// field.ID("user_id", "tweet_id"), +// } +// } +// +func ID(first, second string, fields ...string) *Annotation { + return &Annotation{ID: append([]string{first, second}, fields...)} +} + +// Name describes the annotation name. +func (Annotation) Name() string { + return "Fields" +} + +// Merge implements the schema.Merger interface. +func (a Annotation) Merge(other schema.Annotation) schema.Annotation { + var ant Annotation + switch other := other.(type) { + case Annotation: + ant = other + case *Annotation: + if other != nil { + ant = *other + } + default: + return a + } + if a.StructTag == nil && len(ant.StructTag) > 0 { + a.StructTag = make(map[string]string, len(ant.StructTag)) + } + for k, v := range ant.StructTag { + a.StructTag[k] = v + } + if len(ant.ID) > 0 { + a.ID = ant.ID + } + return a +} + +var _ interface { + schema.Annotation + schema.Merger +} = (*Annotation)(nil) diff --git a/vendor/entgo.io/ent/schema/field/field.go b/vendor/entgo.io/ent/schema/field/field.go new file mode 100644 index 00000000..fc4e16bb --- /dev/null +++ b/vendor/entgo.io/ent/schema/field/field.go @@ -0,0 +1,1457 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package field + +import ( + "database/sql" + "database/sql/driver" + "encoding" + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strings" + "time" + + "entgo.io/ent/schema" +) + +// String returns a new Field with type string. +func String(name string) *stringBuilder { + return &stringBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeString}, + }} +} + +// Text returns a new string field without limitation on the size. +// In MySQL, it is the "longtext" type, but in SQLite and Gremlin it has no effect. +func Text(name string) *stringBuilder { + return &stringBuilder{&Descriptor{ + Name: name, + Size: math.MaxInt32, + Info: &TypeInfo{Type: TypeString}, + }} +} + +// Bytes returns a new Field with type bytes/buffer. +// In MySQL and SQLite, it is the "BLOB" type, and it does not support for Gremlin. +func Bytes(name string) *bytesBuilder { + return &bytesBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeBytes, Nillable: true}, + }} +} + +// Bool returns a new Field with type bool. +func Bool(name string) *boolBuilder { + return &boolBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeBool}, + }} +} + +// Time returns a new Field with type timestamp. +func Time(name string) *timeBuilder { + return &timeBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeTime, PkgPath: "time"}, + }} +} + +// JSON returns a new Field with type json that is serialized to the given object. +// For example: +// +// field.JSON("dirs", []http.Dir{}). +// Optional() +// +// +// field.JSON("info", &Info{}). +// Optional() +func JSON(name string, typ any) *jsonBuilder { + b := &jsonBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{ + Type: TypeJSON, + }, + }} + t := reflect.TypeOf(typ) + if t == nil { + b.desc.Err = errors.New("expect a Go value as JSON type but got nil") + return b + } + b.desc.Info.Ident = t.String() + b.desc.Info.PkgPath = t.PkgPath() + b.desc.goType(typ) + b.desc.checkGoType(t) + switch t.Kind() { + case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: + b.desc.Info.Nillable = true + b.desc.Info.PkgPath = pkgPath(t) + } + return b +} + +// Strings returns a new JSON Field with type []string. +func Strings(name string) *jsonBuilder { + return JSON(name, []string{}) +} + +// Ints returns a new JSON Field with type []int. +func Ints(name string) *jsonBuilder { + return JSON(name, []int{}) +} + +// Floats returns a new JSON Field with type []float. +func Floats(name string) *jsonBuilder { + return JSON(name, []float64{}) +} + +// Any returns a new JSON Field with type any. Although this field type can be +// useful for fields with dynamic data layout, it is strongly recommended to use +// JSON with json.RawMessage instead and implement custom marshaling. +func Any(name string) *jsonBuilder { + const t = "any" + return &jsonBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{ + Type: TypeJSON, + Ident: t, + Nillable: true, + RType: &RType{ + Name: t, + Ident: t, + Kind: reflect.Interface, + }, + }, + }} +} + +// Enum returns a new Field with type enum. An example for defining enum is as follows: +// +// field.Enum("state"). +// Values( +// "on", +// "off", +// ). +// Default("on") +func Enum(name string) *enumBuilder { + return &enumBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeEnum}, + }} +} + +// UUID returns a new Field with type UUID. An example for defining UUID field is as follows: +// +// field.UUID("id", uuid.New()) +func UUID(name string, typ driver.Valuer) *uuidBuilder { + rt := reflect.TypeOf(typ) + b := &uuidBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{ + Type: TypeUUID, + Ident: rt.String(), + PkgPath: indirect(rt).PkgPath(), + }, + }} + b.desc.goType(typ) + return b +} + +// Other represents a field that is not a good fit for any of the standard field types. +// +// The second argument defines the GoType and must implement the ValueScanner interface. +// The SchemaType option must be set because the field type cannot be inferred. +// An example for defining Other field is as follows: +// +// field.Other("link", &Link{}). +// SchemaType(map[string]string{ +// dialect.MySQL: "text", +// dialect.Postgres: "varchar", +// }) +func Other(name string, typ driver.Valuer) *otherBuilder { + ob := &otherBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeOther}, + }} + ob.desc.goType(typ) + return ob +} + +// stringBuilder is the builder for string fields. +type stringBuilder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *stringBuilder) Unique() *stringBuilder { + b.desc.Unique = true + return b +} + +// Sensitive fields not printable and not serializable. +func (b *stringBuilder) Sensitive() *stringBuilder { + b.desc.Sensitive = true + return b +} + +// Match adds a regex matcher for this field. Operation fails if the regex fails. +func (b *stringBuilder) Match(re *regexp.Regexp) *stringBuilder { + b.desc.Validators = append(b.desc.Validators, func(v string) error { + if !re.MatchString(v) { + return errors.New("value does not match validation") + } + return nil + }) + return b +} + +// MinLen adds a length validator for this field. +// Operation fails if the length of the string is less than the given value. +func (b *stringBuilder) MinLen(i int) *stringBuilder { + b.desc.Validators = append(b.desc.Validators, func(v string) error { + if len(v) < i { + return errors.New("value is less than the required length") + } + return nil + }) + return b +} + +// NotEmpty adds a length validator for this field. +// Operation fails if the length of the string is zero. +func (b *stringBuilder) NotEmpty() *stringBuilder { + return b.MinLen(1) +} + +// MaxLen adds a length validator for this field. +// Operation fails if the length of the string is greater than the given value. +func (b *stringBuilder) MaxLen(i int) *stringBuilder { + b.desc.Size = i + b.desc.Validators = append(b.desc.Validators, func(v string) error { + if len(v) > i { + return errors.New("value is greater than the required length") + } + return nil + }) + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *stringBuilder) Validate(fn func(string) error) *stringBuilder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// Default sets the default value of the field. +func (b *stringBuilder) Default(s string) *stringBuilder { + b.desc.Default = s + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. For example: +// +// field.String("cuid"). +// DefaultFunc(cuid.New) +func (b *stringBuilder) DefaultFunc(fn any) *stringBuilder { + if t := reflect.TypeOf(fn); t.Kind() != reflect.Func { + b.desc.Err = fmt.Errorf("field.String(%q).DefaultFunc expects func but got %s", b.desc.Name, t.Kind()) + } + b.desc.Default = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *stringBuilder) Nillable() *stringBuilder { + b.desc.Nillable = true + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *stringBuilder) Optional() *stringBuilder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *stringBuilder) Immutable() *stringBuilder { + b.desc.Immutable = true + return b +} + +// Comment sets the comment of the field. +func (b *stringBuilder) Comment(c string) *stringBuilder { + b.desc.Comment = c + return b +} + +// StructTag sets the struct tag of the field. +func (b *stringBuilder) StructTag(s string) *stringBuilder { + b.desc.Tag = s + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *stringBuilder) StorageKey(key string) *stringBuilder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for string. +// +// field.String("name"). +// SchemaType(map[string]string{ +// dialect.MySQL: "text", +// dialect.Postgres: "varchar", +// }) +func (b *stringBuilder) SchemaType(types map[string]string) *stringBuilder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.String("dir"). +// GoType(http.Dir("dir")) +func (b *stringBuilder) GoType(typ any) *stringBuilder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces, such as slices and maps +// or types exist in external packages (e.g., url.URL). +func (b *stringBuilder) ValueScanner(vs any) *stringBuilder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.String("dir"). +// Annotations( +// entgql.OrderField("DIR"), +// ) +func (b *stringBuilder) Annotations(annotations ...schema.Annotation) *stringBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *stringBuilder) Descriptor() *Descriptor { + if b.desc.Default != nil { + b.desc.checkDefaultFunc(stringType) + } + b.desc.checkGoType(stringType) + return b.desc +} + +// timeBuilder is the builder for time fields. +type timeBuilder struct { + desc *Descriptor +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *timeBuilder) Nillable() *timeBuilder { + b.desc.Nillable = true + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *timeBuilder) Optional() *timeBuilder { + b.desc.Optional = true + return b +} + +// Immutable fields are fields that can be set only in the creation of the entity. +// i.e., no setters will be generated for the entity updaters (one and many). +func (b *timeBuilder) Immutable() *timeBuilder { + b.desc.Immutable = true + return b +} + +// Comment sets the comment of the field. +func (b *timeBuilder) Comment(c string) *timeBuilder { + b.desc.Comment = c + return b +} + +// StructTag sets the struct tag of the field. +func (b *timeBuilder) StructTag(s string) *timeBuilder { + b.desc.Tag = s + return b +} + +// Default sets the function that is applied to set default value +// of the field on creation. For example: +// +// field.Time("created_at"). +// Default(time.Now) +func (b *timeBuilder) Default(fn any) *timeBuilder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Time("updated_at"). +// Default(time.Now). +// UpdateDefault(time.Now), +// +// field.Time("deleted_at"). +// Optional(). +// GoType(&sql.NullTime{}). +// UpdateDefault(NewNullTime), +func (b *timeBuilder) UpdateDefault(fn any) *timeBuilder { + b.desc.UpdateDefault = fn + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *timeBuilder) StorageKey(key string) *timeBuilder { + b.desc.StorageKey = key + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Time("deleted_at"). +// GoType(&sql.NullTime{}) +func (b *timeBuilder) GoType(typ any) *timeBuilder { + b.desc.goType(typ) + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Time("deleted_at"). +// Annotations( +// entgql.OrderField("DELETED_AT"), +// ) +func (b *timeBuilder) Annotations(annotations ...schema.Annotation) *timeBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *timeBuilder) Descriptor() *Descriptor { + if b.desc.Default != nil { + b.desc.checkDefaultFunc(timeType) + } + b.desc.checkGoType(timeType) + return b.desc +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for time. +// +// field.Time("created_at"). +// SchemaType(map[string]string{ +// dialect.MySQL: "datetime", +// dialect.Postgres: "time with time zone", +// }) +func (b *timeBuilder) SchemaType(types map[string]string) *timeBuilder { + b.desc.SchemaType = types + return b +} + +// boolBuilder is the builder for boolean fields. +type boolBuilder struct { + desc *Descriptor +} + +// Default sets the default value of the field. +func (b *boolBuilder) Default(v bool) *boolBuilder { + b.desc.Default = v + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *boolBuilder) Nillable() *boolBuilder { + b.desc.Nillable = true + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *boolBuilder) Optional() *boolBuilder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *boolBuilder) Immutable() *boolBuilder { + b.desc.Immutable = true + return b +} + +// Comment sets the comment of the field. +func (b *boolBuilder) Comment(c string) *boolBuilder { + b.desc.Comment = c + return b +} + +// StructTag sets the struct tag of the field. +func (b *boolBuilder) StructTag(s string) *boolBuilder { + b.desc.Tag = s + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *boolBuilder) StorageKey(key string) *boolBuilder { + b.desc.StorageKey = key + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Bool("deleted"). +// GoType(&sql.NullBool{}) +func (b *boolBuilder) GoType(typ any) *boolBuilder { + b.desc.goType(typ) + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Bool("deleted"). +// Annotations( +// entgql.OrderField("DELETED"), +// ) +func (b *boolBuilder) Annotations(annotations ...schema.Annotation) *boolBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *boolBuilder) Descriptor() *Descriptor { + b.desc.checkGoType(boolType) + return b.desc +} + +// bytesBuilder is the builder for bytes fields. +type bytesBuilder struct { + desc *Descriptor +} + +// Default sets the default value of the field. +func (b *bytesBuilder) Default(v []byte) *bytesBuilder { + b.desc.Default = v + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. For example: +// +// field.Bytes("cuid"). +// DefaultFunc(cuid.New) +func (b *bytesBuilder) DefaultFunc(fn any) *bytesBuilder { + if t := reflect.TypeOf(fn); t.Kind() != reflect.Func { + b.desc.Err = fmt.Errorf("field.Bytes(%q).DefaultFunc expects func but got %s", b.desc.Name, t.Kind()) + } + b.desc.Default = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *bytesBuilder) Nillable() *bytesBuilder { + b.desc.Nillable = true + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *bytesBuilder) Optional() *bytesBuilder { + b.desc.Optional = true + return b +} + +// Sensitive fields not printable and not serializable. +func (b *bytesBuilder) Sensitive() *bytesBuilder { + b.desc.Sensitive = true + return b +} + +// Unique makes the field unique within all vertices of this type. +// Only supported in PostgreSQL. +func (b *bytesBuilder) Unique() *bytesBuilder { + b.desc.Unique = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *bytesBuilder) Immutable() *bytesBuilder { + b.desc.Immutable = true + return b +} + +// Comment sets the comment of the field. +func (b *bytesBuilder) Comment(c string) *bytesBuilder { + b.desc.Comment = c + return b +} + +// StructTag sets the struct tag of the field. +func (b *bytesBuilder) StructTag(s string) *bytesBuilder { + b.desc.Tag = s + return b +} + +// MaxLen sets the max-length of the bytes type in the database. +// In MySQL, this affects the BLOB type (tiny 2^8-1, regular 2^16-1, medium 2^24-1, long 2^32-1). +// In SQLite, it does not have any effect on the type size, which is default to 1B bytes. +func (b *bytesBuilder) MaxLen(i int) *bytesBuilder { + b.desc.Size = i + b.desc.Validators = append(b.desc.Validators, func(buf []byte) error { + if len(buf) > i { + return errors.New("value is greater than the required length") + } + return nil + }) + return b +} + +// MinLen adds a length validator for this field. +// Operation fails if the length of the buffer is less than the given value. +func (b *bytesBuilder) MinLen(i int) *bytesBuilder { + b.desc.Validators = append(b.desc.Validators, func(b []byte) error { + if len(b) < i { + return errors.New("value is less than the required length") + } + return nil + }) + return b +} + +// NotEmpty adds a length validator for this field. +// Operation fails if the length of the buffer is zero. +func (b *bytesBuilder) NotEmpty() *bytesBuilder { + return b.MinLen(1) +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +// +// field.Bytes("blob"). +// Validate(func(b []byte) error { +// if len(b) % 2 == 0 { +// return fmt.Errorf("ent/schema: blob length is even: %d", len(b)) +// } +// return nil +// }) +func (b *bytesBuilder) Validate(fn func([]byte) error) *bytesBuilder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *bytesBuilder) StorageKey(key string) *bytesBuilder { + b.desc.StorageKey = key + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Bytes("ip"). +// GoType(net.IP("127.0.0.1")) +func (b *bytesBuilder) GoType(typ any) *bytesBuilder { + b.desc.goType(typ) + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +func (b *bytesBuilder) Annotations(annotations ...schema.Annotation) *bytesBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for bytes. +// +// field.Bytes("blob"). +// SchemaType(map[string]string{ +// dialect.MySQL: "tinyblob", +// dialect.SQLite: "tinyblob", +// }) +func (b *bytesBuilder) SchemaType(types map[string]string) *bytesBuilder { + b.desc.SchemaType = types + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *bytesBuilder) Descriptor() *Descriptor { + if b.desc.Default != nil { + b.desc.checkDefaultFunc(bytesType) + } + b.desc.checkGoType(bytesType) + return b.desc +} + +// jsonBuilder is the builder for json fields. +type jsonBuilder struct { + desc *Descriptor +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *jsonBuilder) StorageKey(key string) *jsonBuilder { + b.desc.StorageKey = key + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *jsonBuilder) Optional() *jsonBuilder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *jsonBuilder) Immutable() *jsonBuilder { + b.desc.Immutable = true + return b +} + +// Comment sets the comment of the field. +func (b *jsonBuilder) Comment(c string) *jsonBuilder { + b.desc.Comment = c + return b +} + +// Sensitive fields not printable and not serializable. +func (b *jsonBuilder) Sensitive() *jsonBuilder { + b.desc.Sensitive = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *jsonBuilder) StructTag(s string) *jsonBuilder { + b.desc.Tag = s + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for json. +// +// field.JSON("json"). +// SchemaType(map[string]string{ +// dialect.MySQL: "json", +// dialect.Postgres: "jsonb", +// }) +func (b *jsonBuilder) SchemaType(types map[string]string) *jsonBuilder { + b.desc.SchemaType = types + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +func (b *jsonBuilder) Annotations(annotations ...schema.Annotation) *jsonBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Default sets the default value of the field. For example: +// +// field.JSON("dirs", []http.Dir{}). +// // A static default value. +// Default([]http.Dir{"/tmp"}) +// +// field.JSON("dirs", []http.Dir{}). +// // A function for generating the default value. +// Default(DefaultDirs) +func (b *jsonBuilder) Default(v any) *jsonBuilder { + b.desc.Default = v + switch fieldT, defaultT := b.desc.Info.RType.rtype, reflect.TypeOf(v); { + case fieldT == defaultT: + case defaultT.Kind() == reflect.Func: + b.desc.checkDefaultFunc(b.desc.Info.RType.rtype) + default: + b.desc.Err = fmt.Errorf("expect type (func() %[1]s) or (%[1]s) for other default value", b.desc.Info) + } + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *jsonBuilder) Descriptor() *Descriptor { + return b.desc +} + +// enumBuilder is the builder for enum fields. +type enumBuilder struct { + desc *Descriptor +} + +// Values adds given values to the enum values. +// +// field.Enum("priority"). +// Values("low", "mid", "high") +func (b *enumBuilder) Values(values ...string) *enumBuilder { + for _, v := range values { + b.desc.Enums = append(b.desc.Enums, struct{ N, V string }{N: v, V: v}) + } + return b +} + +// NamedValues adds the given name, value pairs to the enum value. +// The "name" defines the Go identifier of the enum, and the value +// defines the actual value in the database. +// +// NamedValues returns an error if given an odd number of arguments. +// +// field.Enum("priority"). +// NamedValues( +// "Low", "LOW", +// "Mid", "MID", +// "High", "HIGH", +// ) +func (b *enumBuilder) NamedValues(namevalue ...string) *enumBuilder { + if len(namevalue)%2 == 1 { + b.desc.Err = fmt.Errorf("Enum.NamedValues: odd argument count") + return b + } + for i := 0; i < len(namevalue); i += 2 { + b.desc.Enums = append(b.desc.Enums, struct{ N, V string }{N: namevalue[i], V: namevalue[i+1]}) + } + return b +} + +// Default sets the default value of the field. +func (b *enumBuilder) Default(value string) *enumBuilder { + b.desc.Default = value + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *enumBuilder) StorageKey(key string) *enumBuilder { + b.desc.StorageKey = key + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *enumBuilder) Optional() *enumBuilder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *enumBuilder) Immutable() *enumBuilder { + b.desc.Immutable = true + return b +} + +// Comment sets the comment of the field. +func (b *enumBuilder) Comment(c string) *enumBuilder { + b.desc.Comment = c + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *enumBuilder) Nillable() *enumBuilder { + b.desc.Nillable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *enumBuilder) StructTag(s string) *enumBuilder { + b.desc.Tag = s + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for enum. +// +// field.Enum("enum"). +// SchemaType(map[string]string{ +// dialect.Postgres: "EnumType", +// }) +func (b *enumBuilder) SchemaType(types map[string]string) *enumBuilder { + b.desc.SchemaType = types + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Enum("enum"). +// Annotations( +// entgql.OrderField("ENUM"), +// ) +func (b *enumBuilder) Annotations(annotations ...schema.Annotation) *enumBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// EnumValues defines the interface for getting the enum values. +type EnumValues interface { + Values() []string +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Enum("enum"). +// GoType(role.Enum("role")) +func (b *enumBuilder) GoType(ev EnumValues) *enumBuilder { + b.Values(ev.Values()...) + b.desc.goType(ev) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *enumBuilder) Descriptor() *Descriptor { + if b.desc.Info.RType != nil { + // If an error already exists, let that be returned instead. + // Otherwise, check that the underlying type is either a string or implements Stringer. + if b.desc.Err == nil && b.desc.Info.RType.rtype.Kind() != reflect.String && !b.desc.Info.Stringer() { + b.desc.Err = errors.New("enum values which implement ValueScanner must also implement Stringer") + } + b.desc.checkGoType(stringType) + } + return b.desc +} + +// uuidBuilder is the builder for uuid fields. +type uuidBuilder struct { + desc *Descriptor +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *uuidBuilder) StorageKey(key string) *uuidBuilder { + b.desc.StorageKey = key + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *uuidBuilder) Nillable() *uuidBuilder { + b.desc.Nillable = true + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *uuidBuilder) Optional() *uuidBuilder { + b.desc.Optional = true + return b +} + +// Unique makes the field unique within all vertices of this type. +func (b *uuidBuilder) Unique() *uuidBuilder { + b.desc.Unique = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *uuidBuilder) Immutable() *uuidBuilder { + b.desc.Immutable = true + return b +} + +// Comment sets the comment of the field. +func (b *uuidBuilder) Comment(c string) *uuidBuilder { + b.desc.Comment = c + return b +} + +// StructTag sets the struct tag of the field. +func (b *uuidBuilder) StructTag(s string) *uuidBuilder { + b.desc.Tag = s + return b +} + +// Default sets the function that is applied to set default value +// of the field on creation. Codegen fails if the default function +// doesn't return the same concrete that was set for the UUID type. +// +// field.UUID("id", uuid.UUID{}). +// Default(uuid.New) +func (b *uuidBuilder) Default(fn any) *uuidBuilder { + typ := reflect.TypeOf(fn) + if typ.Kind() != reflect.Func || typ.NumIn() != 0 || typ.NumOut() != 1 || typ.Out(0).String() != b.desc.Info.String() { + b.desc.Err = fmt.Errorf("expect type (func() %s) for uuid default value", b.desc.Info) + } + b.desc.Default = fn + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for uuid. +// +// field.UUID("id", uuid.New()). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomUUID", +// }) +func (b *uuidBuilder) SchemaType(types map[string]string) *uuidBuilder { + b.desc.SchemaType = types + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.UUID("id", uuid.New()). +// Annotations( +// entgql.OrderField("ID"), +// ) +func (b *uuidBuilder) Annotations(annotations ...schema.Annotation) *uuidBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *uuidBuilder) Descriptor() *Descriptor { + b.desc.checkGoType(valueScannerType) + return b.desc +} + +// otherBuilder is the builder for other fields. +type otherBuilder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *otherBuilder) Unique() *otherBuilder { + b.desc.Unique = true + return b +} + +// Sensitive fields not printable and not serializable. +func (b *otherBuilder) Sensitive() *otherBuilder { + b.desc.Sensitive = true + return b +} + +// Default sets the default value of the field. For example: +// +// field.Other("link", &Link{}). +// SchemaType(map[string]string{ +// dialect.MySQL: "text", +// dialect.Postgres: "varchar", +// }). +// // A static default value. +// Default(&Link{Addr: "0.0.0.0"}) +// +// field.Other("link", &Link{}). +// SchemaType(map[string]string{ +// dialect.MySQL: "text", +// dialect.Postgres: "varchar", +// }). +// // A function for generating the default value. +// Default(NewLink) +func (b *otherBuilder) Default(v any) *otherBuilder { + b.desc.Default = v + switch fieldT, defaultT := b.desc.Info.RType.rtype, reflect.TypeOf(v); { + case fieldT == defaultT: + case defaultT.Kind() == reflect.Func: + b.desc.checkDefaultFunc(b.desc.Info.RType.rtype) + default: + b.desc.Err = fmt.Errorf("expect type (func() %[1]s) or (%[1]s) for other default value", b.desc.Info) + } + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated field. +func (b *otherBuilder) Nillable() *otherBuilder { + b.desc.Nillable = true + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *otherBuilder) Optional() *otherBuilder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *otherBuilder) Immutable() *otherBuilder { + b.desc.Immutable = true + return b +} + +// Comment sets the comment of the field. +func (b *otherBuilder) Comment(c string) *otherBuilder { + b.desc.Comment = c + return b +} + +// StructTag sets the struct tag of the field. +func (b *otherBuilder) StructTag(s string) *otherBuilder { + b.desc.Tag = s + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *otherBuilder) StorageKey(key string) *otherBuilder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for string. +// +// field.Other("link", Link{}). +// SchemaType(map[string]string{ +// dialect.MySQL: "text", +// dialect.Postgres: "varchar", +// }) +func (b *otherBuilder) SchemaType(types map[string]string) *otherBuilder { + b.desc.SchemaType = types + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Other("link", &Link{}). +// SchemaType(map[string]string{ +// dialect.MySQL: "text", +// dialect.Postgres: "varchar", +// }). +// Annotations( +// entgql.OrderField("LINK"), +// ) +func (b *otherBuilder) Annotations(annotations ...schema.Annotation) *otherBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *otherBuilder) Descriptor() *Descriptor { + b.desc.checkGoType(valueScannerType) + if len(b.desc.SchemaType) == 0 { + b.desc.Err = fmt.Errorf("expect SchemaType to be set for other field") + } + return b.desc +} + +// A Descriptor for field configuration. +type Descriptor struct { + Tag string // struct tag. + Size int // varchar size. + Name string // field name. + Info *TypeInfo // field type info. + ValueScanner any // custom field codec. + Unique bool // unique index of field. + Nillable bool // nillable struct field. + Optional bool // nullable field in database. + Immutable bool // create only field. + Default any // default value on create. + UpdateDefault any // default value on update. + Validators []any // validator functions. + StorageKey string // sql column or gremlin property. + Enums []struct{ N, V string } // enum values. + Sensitive bool // sensitive info string field. + SchemaType map[string]string // override the schema type. + Annotations []schema.Annotation // field annotations. + Comment string // field comment. + Err error +} + +func (d *Descriptor) goType(typ any) { + t := reflect.TypeOf(typ) + tv := indirect(t) + info := &TypeInfo{ + Type: d.Info.Type, + Ident: t.String(), + PkgPath: tv.PkgPath(), + PkgName: pkgName(tv.String()), + RType: &RType{ + rtype: t, + Kind: t.Kind(), + Name: tv.Name(), + Ident: tv.String(), + PkgPath: tv.PkgPath(), + Methods: make(map[string]struct{ In, Out []*RType }, t.NumMethod()), + }, + } + methods(t, info.RType) + switch t.Kind() { + case reflect.Slice, reflect.Ptr, reflect.Map: + info.Nillable = true + } + d.Info = info +} + +func (d *Descriptor) checkGoType(expectType reflect.Type) { + t := expectType + if d.Info.RType != nil && d.Info.RType.rtype != nil { + t = d.Info.RType.rtype + } + switch pt := reflect.PtrTo(t); { + // An external ValueScanner. + case d.ValueScanner != nil: + vs := reflect.Indirect(reflect.ValueOf(d.ValueScanner)).Type() + m1, ok1 := vs.MethodByName("Value") + m2, ok2 := vs.MethodByName("ScanValue") + m3, ok3 := vs.MethodByName("FromValue") + switch { + case !ok1, m1.Type.NumIn() != 2, m1.Type.In(1) != t, + m1.Type.NumOut() != 2, m1.Type.Out(0) != valueType, m1.Type.Out(1) != errorType: + d.Err = fmt.Errorf("ValueScanner must implement the Value method: func Value(%s) (driver.Valuer, error)", t) + case !ok2, m2.Type.NumIn() != 1, m2.Type.NumOut() != 1, m2.Type.Out(0) != valueScannerType: + d.Err = errors.New("ValueScanner must implement the ScanValue method: func ScanValue() field.ValueScanner") + case !ok3, m3.Type.NumIn() != 2, m3.Type.In(1) != valueType, m3.Type.NumOut() != 2, m3.Type.Out(0) != t, m3.Type.Out(1) != errorType: + d.Err = fmt.Errorf("ValueScanner must implement the FromValue method: func FromValue(driver.Valuer) (%s, error)", t) + } + // No GoType was provided. + case d.Info.RType == nil: + // A GoType without an external ValueScanner. + case pt.Implements(valueScannerType), t.Implements(valueScannerType), t.Kind() == expectType.Kind() && t.ConvertibleTo(expectType): + // There is a GoType, but it's not a ValueScanner. + default: + d.Err = fmt.Errorf("GoType must be a %q type, ValueScanner or provide an external ValueScanner", expectType) + } +} + +// pkgName returns the package name from a Go +// identifier with a package qualifier. +func pkgName(ident string) string { + i := strings.LastIndexByte(ident, '.') + if i == -1 { + return "" + } + s := ident[:i] + if i := strings.LastIndexAny(s, "]*"); i != -1 { + s = s[i+1:] + } + return s +} + +func methods(t reflect.Type, rtype *RType) { + // For type T, add methods with + // pointer receiver as well (*T). + if t.Kind() != reflect.Ptr { + t = reflect.PtrTo(t) + } + n := t.NumMethod() + for i := 0; i < n; i++ { + m := t.Method(i) + in := make([]*RType, m.Type.NumIn()-1) + for j := range in { + arg := m.Type.In(j + 1) + in[j] = &RType{Name: arg.Name(), Ident: arg.String(), Kind: arg.Kind(), PkgPath: arg.PkgPath()} + } + out := make([]*RType, m.Type.NumOut()) + for j := range out { + ret := m.Type.Out(j) + out[j] = &RType{Name: ret.Name(), Ident: ret.String(), Kind: ret.Kind(), PkgPath: ret.PkgPath()} + } + rtype.Methods[m.Name] = struct{ In, Out []*RType }{in, out} + } +} + +func (d *Descriptor) checkDefaultFunc(expectType reflect.Type) { + for _, typ := range []reflect.Type{reflect.TypeOf(d.Default), reflect.TypeOf(d.UpdateDefault)} { + if typ == nil || typ.Kind() != reflect.Func || d.Err != nil { + continue + } + err := fmt.Errorf("expect type (func() %s) for default value", d.Info) + if typ.NumIn() != 0 || typ.NumOut() != 1 { + d.Err = err + } + rtype := expectType + if d.Info.RType != nil { + rtype = d.Info.RType.rtype + } + if !typ.Out(0).AssignableTo(rtype) { + d.Err = err + } + } +} + +var ( + boolType = reflect.TypeOf(false) + bytesType = reflect.TypeOf([]byte(nil)) + timeType = reflect.TypeOf(time.Time{}) + stringType = reflect.TypeOf("") + valueType = reflect.TypeOf((*driver.Value)(nil)).Elem() + valuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + errorType = reflect.TypeOf((*error)(nil)).Elem() + valueScannerType = reflect.TypeOf((*ValueScanner)(nil)).Elem() + validatorType = reflect.TypeOf((*Validator)(nil)).Elem() +) + +// ValueScanner is the interface that groups the Value +// and the Scan methods implemented by custom Go types. +type ValueScanner interface { + driver.Valuer + sql.Scanner +} + +// TypeValueScanner is the interface that groups all methods for +// attaching an external ValueScanner to a custom GoType. +type TypeValueScanner[T any] interface { + // Value returns the driver.Valuer for the GoType. + Value(T) (driver.Value, error) + // ScanValue returns a new ValueScanner that functions as an + // intermediate result between database value and GoType value. + // For example, sql.NullString or sql.NullInt. + ScanValue() ValueScanner + // FromValue returns the field instance from the ScanValue + // above after the database value was scanned. + FromValue(driver.Value) (T, error) +} + +// TextValueScanner returns a new TypeValueScanner that calls MarshalText +// for storing values in the database, and calls UnmarshalText for scanning +// database values into struct fields. +type TextValueScanner[T interface { + encoding.TextMarshaler + encoding.TextUnmarshaler +}] struct{} + +// Value implements the TypeValueScanner.Value method. +func (TextValueScanner[T]) Value(v T) (driver.Value, error) { + return v.MarshalText() +} + +// ScanValue implements the TypeValueScanner.ScanValue method. +func (TextValueScanner[T]) ScanValue() ValueScanner { + return &sql.NullString{} +} + +// FromValue implements the TypeValueScanner.FromValue method. +func (TextValueScanner[T]) FromValue(v driver.Value) (tv T, err error) { + s, ok := v.(*sql.NullString) + if !ok { + return tv, fmt.Errorf("unexpected input for FromValue: %T", v) + } + tv = newT(tv).(T) + if s.Valid { + err = tv.UnmarshalText([]byte(s.String)) + } + return tv, err +} + +// BinaryValueScanner returns a new TypeValueScanner that calls MarshalBinary +// for storing values in the database, and calls UnmarshalBinary for scanning +// database values into struct fields. +type BinaryValueScanner[T interface { + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler +}] struct{} + +// Value implements the TypeValueScanner.Value method. +func (BinaryValueScanner[T]) Value(v T) (driver.Value, error) { + return v.MarshalBinary() +} + +// ScanValue implements the TypeValueScanner.ScanValue method. +func (BinaryValueScanner[T]) ScanValue() ValueScanner { + return &sql.NullString{} +} + +// FromValue implements the TypeValueScanner.FromValue method. +func (BinaryValueScanner[T]) FromValue(v driver.Value) (tv T, err error) { + s, ok := v.(*sql.NullString) + if !ok { + return tv, fmt.Errorf("unexpected input for FromValue: %T", v) + } + tv = newT(tv).(T) + if s.Valid { + err = tv.UnmarshalBinary([]byte(s.String)) + } + return tv, err +} + +// ValueScannerFunc is a wrapper for a function that implements the ValueScanner. +type ValueScannerFunc[T any, S ValueScanner] struct { + V func(T) (driver.Value, error) + S func(S) (T, error) +} + +// Value implements the TypeValueScanner.Value method. +func (f ValueScannerFunc[T, S]) Value(t T) (driver.Value, error) { + return f.V(t) +} + +// ScanValue implements the TypeValueScanner.ScanValue method. +func (f ValueScannerFunc[T, S]) ScanValue() ValueScanner { + var s S + return newT(s).(S) +} + +// FromValue implements the TypeValueScanner.FromValue method. +func (f ValueScannerFunc[T, S]) FromValue(v driver.Value) (tv T, err error) { + s, ok := v.(S) + if !ok { + return tv, fmt.Errorf("unexpected input for FromValue: %T", v) + } + return f.S(s) +} + +// newT ensures the type is initialized. +func newT(t any) any { + if rt := reflect.TypeOf(t); rt.Kind() == reflect.Ptr { + return reflect.New(rt.Elem()).Interface() + } + return t +} + +// Validator interface wraps the Validate method. Custom GoTypes with +// this method will be validated when the entity is created or updated. +type Validator interface { + Validate() error +} + +// indirect returns the type at the end of indirection. +func indirect(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func pkgPath(t reflect.Type) string { + pkg := t.PkgPath() + if pkg != "" { + return pkg + } + switch t.Kind() { + case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: + return pkgPath(t.Elem()) + } + return pkg +} diff --git a/vendor/entgo.io/ent/schema/field/numeric.go b/vendor/entgo.io/ent/schema/field/numeric.go new file mode 100644 index 00000000..df322c98 --- /dev/null +++ b/vendor/entgo.io/ent/schema/field/numeric.go @@ -0,0 +1,2274 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +// Code generated by internal/numeric.tmpl, DO NOT EDIT. + +package field + +import ( + "errors" + "reflect" + + "entgo.io/ent/schema" +) + +//go:generate go run internal/gen.go + +// Int returns a new Field with type int. +func Int(name string) *intBuilder { + return &intBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeInt}, + }} +} + +// Uint returns a new Field with type uint. +func Uint(name string) *uintBuilder { + return &uintBuilder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeUint}, + }} +} + +// Int8 returns a new Field with type int8. +func Int8(name string) *int8Builder { + return &int8Builder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeInt8}, + }} +} + +// Int16 returns a new Field with type int16. +func Int16(name string) *int16Builder { + return &int16Builder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeInt16}, + }} +} + +// Int32 returns a new Field with type int32. +func Int32(name string) *int32Builder { + return &int32Builder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeInt32}, + }} +} + +// Int64 returns a new Field with type int64. +func Int64(name string) *int64Builder { + return &int64Builder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeInt64}, + }} +} + +// Uint8 returns a new Field with type uint8. +func Uint8(name string) *uint8Builder { + return &uint8Builder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeUint8}, + }} +} + +// Uint16 returns a new Field with type uint16. +func Uint16(name string) *uint16Builder { + return &uint16Builder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeUint16}, + }} +} + +// Uint32 returns a new Field with type uint32. +func Uint32(name string) *uint32Builder { + return &uint32Builder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeUint32}, + }} +} + +// Uint64 returns a new Field with type uint64. +func Uint64(name string) *uint64Builder { + return &uint64Builder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeUint64}, + }} +} + +// Float returns a new Field with type float64. +func Float(name string) *float64Builder { + return &float64Builder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeFloat64}, + }} +} + +// Float32 returns a new Field with type float32. +func Float32(name string) *float32Builder { + return &float32Builder{&Descriptor{ + Name: name, + Info: &TypeInfo{Type: TypeFloat32}, + }} +} + +// intBuilder is the builder for int field. +type intBuilder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *intBuilder) Unique() *intBuilder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *intBuilder) Range(i, j int) *intBuilder { + b.desc.Validators = append(b.desc.Validators, func(v int) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *intBuilder) Min(i int) *intBuilder { + b.desc.Validators = append(b.desc.Validators, func(v int) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *intBuilder) Max(i int) *intBuilder { + b.desc.Validators = append(b.desc.Validators, func(v int) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *intBuilder) Positive() *intBuilder { + return b.Min(1) +} + +// Negative adds a maximum value validator with the value of -1. Operation fails if the validator fails. +func (b *intBuilder) Negative() *intBuilder { + return b.Max(-1) +} + +// NonNegative adds a minimum value validator with the value of 0. Operation fails if the validator fails. +func (b *intBuilder) NonNegative() *intBuilder { + return b.Min(0) +} + +// Default sets the default value of the field. +func (b *intBuilder) Default(i int) *intBuilder { + b.desc.Default = i + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. +func (b *intBuilder) DefaultFunc(fn any) *intBuilder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Int("int"). +// Default(0). +// UpdateDefault(GenNumber), +func (b *intBuilder) UpdateDefault(fn any) *intBuilder { + b.desc.UpdateDefault = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *intBuilder) Nillable() *intBuilder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *intBuilder) Comment(c string) *intBuilder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *intBuilder) Optional() *intBuilder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *intBuilder) Immutable() *intBuilder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *intBuilder) StructTag(s string) *intBuilder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *intBuilder) Validate(fn func(int) error) *intBuilder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *intBuilder) StorageKey(key string) *intBuilder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for int. +// +// field.Int("oid"). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomType", +// }) +func (b *intBuilder) SchemaType(types map[string]string) *intBuilder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Int("int"). +// GoType(pkg.Int(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *intBuilder) GoType(typ any) *intBuilder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *intBuilder) ValueScanner(vs any) *intBuilder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Int("int"). +// Annotations(entgql.OrderField("INT")) +func (b *intBuilder) Annotations(annotations ...schema.Annotation) *intBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *intBuilder) Descriptor() *Descriptor { + if b.desc.Default != nil || b.desc.UpdateDefault != nil { + b.desc.checkDefaultFunc(intType) + } + b.desc.checkGoType(intType) + return b.desc +} + +// uintBuilder is the builder for uint field. +type uintBuilder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *uintBuilder) Unique() *uintBuilder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *uintBuilder) Range(i, j uint) *uintBuilder { + b.desc.Validators = append(b.desc.Validators, func(v uint) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *uintBuilder) Min(i uint) *uintBuilder { + b.desc.Validators = append(b.desc.Validators, func(v uint) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *uintBuilder) Max(i uint) *uintBuilder { + b.desc.Validators = append(b.desc.Validators, func(v uint) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *uintBuilder) Positive() *uintBuilder { + return b.Min(1) +} + +// Default sets the default value of the field. +func (b *uintBuilder) Default(i uint) *uintBuilder { + b.desc.Default = i + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. +func (b *uintBuilder) DefaultFunc(fn any) *uintBuilder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Uint("uint"). +// Default(0). +// UpdateDefault(GenNumber), +func (b *uintBuilder) UpdateDefault(fn any) *uintBuilder { + b.desc.UpdateDefault = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *uintBuilder) Nillable() *uintBuilder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *uintBuilder) Comment(c string) *uintBuilder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *uintBuilder) Optional() *uintBuilder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *uintBuilder) Immutable() *uintBuilder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *uintBuilder) StructTag(s string) *uintBuilder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *uintBuilder) Validate(fn func(uint) error) *uintBuilder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *uintBuilder) StorageKey(key string) *uintBuilder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for uint. +// +// field.Uint("oid"). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomType", +// }) +func (b *uintBuilder) SchemaType(types map[string]string) *uintBuilder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Uint("uint"). +// GoType(pkg.Uint(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *uintBuilder) GoType(typ any) *uintBuilder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *uintBuilder) ValueScanner(vs any) *uintBuilder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Uint("uint"). +// Annotations(entgql.OrderField("UINT")) +func (b *uintBuilder) Annotations(annotations ...schema.Annotation) *uintBuilder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *uintBuilder) Descriptor() *Descriptor { + if b.desc.Default != nil || b.desc.UpdateDefault != nil { + b.desc.checkDefaultFunc(uintType) + } + b.desc.checkGoType(uintType) + return b.desc +} + +// int8Builder is the builder for int8 field. +type int8Builder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *int8Builder) Unique() *int8Builder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *int8Builder) Range(i, j int8) *int8Builder { + b.desc.Validators = append(b.desc.Validators, func(v int8) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *int8Builder) Min(i int8) *int8Builder { + b.desc.Validators = append(b.desc.Validators, func(v int8) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *int8Builder) Max(i int8) *int8Builder { + b.desc.Validators = append(b.desc.Validators, func(v int8) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *int8Builder) Positive() *int8Builder { + return b.Min(1) +} + +// Negative adds a maximum value validator with the value of -1. Operation fails if the validator fails. +func (b *int8Builder) Negative() *int8Builder { + return b.Max(-1) +} + +// NonNegative adds a minimum value validator with the value of 0. Operation fails if the validator fails. +func (b *int8Builder) NonNegative() *int8Builder { + return b.Min(0) +} + +// Default sets the default value of the field. +func (b *int8Builder) Default(i int8) *int8Builder { + b.desc.Default = i + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. +func (b *int8Builder) DefaultFunc(fn any) *int8Builder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Int8("int8"). +// Default(0). +// UpdateDefault(GenNumber), +func (b *int8Builder) UpdateDefault(fn any) *int8Builder { + b.desc.UpdateDefault = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *int8Builder) Nillable() *int8Builder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *int8Builder) Comment(c string) *int8Builder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *int8Builder) Optional() *int8Builder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *int8Builder) Immutable() *int8Builder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *int8Builder) StructTag(s string) *int8Builder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *int8Builder) Validate(fn func(int8) error) *int8Builder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *int8Builder) StorageKey(key string) *int8Builder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for int8. +// +// field.Int8("oid"). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomType", +// }) +func (b *int8Builder) SchemaType(types map[string]string) *int8Builder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Int8("int8"). +// GoType(pkg.Int8(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *int8Builder) GoType(typ any) *int8Builder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *int8Builder) ValueScanner(vs any) *int8Builder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Int8("int8"). +// Annotations(entgql.OrderField("INT8")) +func (b *int8Builder) Annotations(annotations ...schema.Annotation) *int8Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *int8Builder) Descriptor() *Descriptor { + if b.desc.Default != nil || b.desc.UpdateDefault != nil { + b.desc.checkDefaultFunc(int8Type) + } + b.desc.checkGoType(int8Type) + return b.desc +} + +// int16Builder is the builder for int16 field. +type int16Builder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *int16Builder) Unique() *int16Builder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *int16Builder) Range(i, j int16) *int16Builder { + b.desc.Validators = append(b.desc.Validators, func(v int16) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *int16Builder) Min(i int16) *int16Builder { + b.desc.Validators = append(b.desc.Validators, func(v int16) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *int16Builder) Max(i int16) *int16Builder { + b.desc.Validators = append(b.desc.Validators, func(v int16) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *int16Builder) Positive() *int16Builder { + return b.Min(1) +} + +// Negative adds a maximum value validator with the value of -1. Operation fails if the validator fails. +func (b *int16Builder) Negative() *int16Builder { + return b.Max(-1) +} + +// NonNegative adds a minimum value validator with the value of 0. Operation fails if the validator fails. +func (b *int16Builder) NonNegative() *int16Builder { + return b.Min(0) +} + +// Default sets the default value of the field. +func (b *int16Builder) Default(i int16) *int16Builder { + b.desc.Default = i + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. +func (b *int16Builder) DefaultFunc(fn any) *int16Builder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Int16("int16"). +// Default(0). +// UpdateDefault(GenNumber), +func (b *int16Builder) UpdateDefault(fn any) *int16Builder { + b.desc.UpdateDefault = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *int16Builder) Nillable() *int16Builder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *int16Builder) Comment(c string) *int16Builder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *int16Builder) Optional() *int16Builder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *int16Builder) Immutable() *int16Builder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *int16Builder) StructTag(s string) *int16Builder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *int16Builder) Validate(fn func(int16) error) *int16Builder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *int16Builder) StorageKey(key string) *int16Builder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for int16. +// +// field.Int16("oid"). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomType", +// }) +func (b *int16Builder) SchemaType(types map[string]string) *int16Builder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Int16("int16"). +// GoType(pkg.Int16(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *int16Builder) GoType(typ any) *int16Builder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *int16Builder) ValueScanner(vs any) *int16Builder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Int16("int16"). +// Annotations(entgql.OrderField("INT16")) +func (b *int16Builder) Annotations(annotations ...schema.Annotation) *int16Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *int16Builder) Descriptor() *Descriptor { + if b.desc.Default != nil || b.desc.UpdateDefault != nil { + b.desc.checkDefaultFunc(int16Type) + } + b.desc.checkGoType(int16Type) + return b.desc +} + +// int32Builder is the builder for int32 field. +type int32Builder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *int32Builder) Unique() *int32Builder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *int32Builder) Range(i, j int32) *int32Builder { + b.desc.Validators = append(b.desc.Validators, func(v int32) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *int32Builder) Min(i int32) *int32Builder { + b.desc.Validators = append(b.desc.Validators, func(v int32) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *int32Builder) Max(i int32) *int32Builder { + b.desc.Validators = append(b.desc.Validators, func(v int32) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *int32Builder) Positive() *int32Builder { + return b.Min(1) +} + +// Negative adds a maximum value validator with the value of -1. Operation fails if the validator fails. +func (b *int32Builder) Negative() *int32Builder { + return b.Max(-1) +} + +// NonNegative adds a minimum value validator with the value of 0. Operation fails if the validator fails. +func (b *int32Builder) NonNegative() *int32Builder { + return b.Min(0) +} + +// Default sets the default value of the field. +func (b *int32Builder) Default(i int32) *int32Builder { + b.desc.Default = i + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. +func (b *int32Builder) DefaultFunc(fn any) *int32Builder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Int32("int32"). +// Default(0). +// UpdateDefault(GenNumber), +func (b *int32Builder) UpdateDefault(fn any) *int32Builder { + b.desc.UpdateDefault = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *int32Builder) Nillable() *int32Builder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *int32Builder) Comment(c string) *int32Builder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *int32Builder) Optional() *int32Builder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *int32Builder) Immutable() *int32Builder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *int32Builder) StructTag(s string) *int32Builder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *int32Builder) Validate(fn func(int32) error) *int32Builder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *int32Builder) StorageKey(key string) *int32Builder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for int32. +// +// field.Int32("oid"). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomType", +// }) +func (b *int32Builder) SchemaType(types map[string]string) *int32Builder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Int32("int32"). +// GoType(pkg.Int32(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *int32Builder) GoType(typ any) *int32Builder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *int32Builder) ValueScanner(vs any) *int32Builder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Int32("int32"). +// Annotations(entgql.OrderField("INT32")) +func (b *int32Builder) Annotations(annotations ...schema.Annotation) *int32Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *int32Builder) Descriptor() *Descriptor { + if b.desc.Default != nil || b.desc.UpdateDefault != nil { + b.desc.checkDefaultFunc(int32Type) + } + b.desc.checkGoType(int32Type) + return b.desc +} + +// int64Builder is the builder for int64 field. +type int64Builder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *int64Builder) Unique() *int64Builder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *int64Builder) Range(i, j int64) *int64Builder { + b.desc.Validators = append(b.desc.Validators, func(v int64) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *int64Builder) Min(i int64) *int64Builder { + b.desc.Validators = append(b.desc.Validators, func(v int64) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *int64Builder) Max(i int64) *int64Builder { + b.desc.Validators = append(b.desc.Validators, func(v int64) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *int64Builder) Positive() *int64Builder { + return b.Min(1) +} + +// Negative adds a maximum value validator with the value of -1. Operation fails if the validator fails. +func (b *int64Builder) Negative() *int64Builder { + return b.Max(-1) +} + +// NonNegative adds a minimum value validator with the value of 0. Operation fails if the validator fails. +func (b *int64Builder) NonNegative() *int64Builder { + return b.Min(0) +} + +// Default sets the default value of the field. +func (b *int64Builder) Default(i int64) *int64Builder { + b.desc.Default = i + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. +func (b *int64Builder) DefaultFunc(fn any) *int64Builder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Int64("int64"). +// Default(0). +// UpdateDefault(GenNumber), +func (b *int64Builder) UpdateDefault(fn any) *int64Builder { + b.desc.UpdateDefault = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *int64Builder) Nillable() *int64Builder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *int64Builder) Comment(c string) *int64Builder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *int64Builder) Optional() *int64Builder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *int64Builder) Immutable() *int64Builder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *int64Builder) StructTag(s string) *int64Builder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *int64Builder) Validate(fn func(int64) error) *int64Builder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *int64Builder) StorageKey(key string) *int64Builder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for int64. +// +// field.Int64("oid"). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomType", +// }) +func (b *int64Builder) SchemaType(types map[string]string) *int64Builder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Int64("int64"). +// GoType(pkg.Int64(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *int64Builder) GoType(typ any) *int64Builder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *int64Builder) ValueScanner(vs any) *int64Builder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Int64("int64"). +// Annotations(entgql.OrderField("INT64")) +func (b *int64Builder) Annotations(annotations ...schema.Annotation) *int64Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *int64Builder) Descriptor() *Descriptor { + if b.desc.Default != nil || b.desc.UpdateDefault != nil { + b.desc.checkDefaultFunc(int64Type) + } + b.desc.checkGoType(int64Type) + return b.desc +} + +// uint8Builder is the builder for uint8 field. +type uint8Builder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *uint8Builder) Unique() *uint8Builder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *uint8Builder) Range(i, j uint8) *uint8Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint8) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *uint8Builder) Min(i uint8) *uint8Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint8) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *uint8Builder) Max(i uint8) *uint8Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint8) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *uint8Builder) Positive() *uint8Builder { + return b.Min(1) +} + +// Default sets the default value of the field. +func (b *uint8Builder) Default(i uint8) *uint8Builder { + b.desc.Default = i + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. +func (b *uint8Builder) DefaultFunc(fn any) *uint8Builder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Uint8("uint8"). +// Default(0). +// UpdateDefault(GenNumber), +func (b *uint8Builder) UpdateDefault(fn any) *uint8Builder { + b.desc.UpdateDefault = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *uint8Builder) Nillable() *uint8Builder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *uint8Builder) Comment(c string) *uint8Builder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *uint8Builder) Optional() *uint8Builder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *uint8Builder) Immutable() *uint8Builder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *uint8Builder) StructTag(s string) *uint8Builder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *uint8Builder) Validate(fn func(uint8) error) *uint8Builder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *uint8Builder) StorageKey(key string) *uint8Builder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for uint8. +// +// field.Uint8("oid"). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomType", +// }) +func (b *uint8Builder) SchemaType(types map[string]string) *uint8Builder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Uint8("uint8"). +// GoType(pkg.Uint8(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *uint8Builder) GoType(typ any) *uint8Builder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *uint8Builder) ValueScanner(vs any) *uint8Builder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Uint8("uint8"). +// Annotations(entgql.OrderField("UINT8")) +func (b *uint8Builder) Annotations(annotations ...schema.Annotation) *uint8Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *uint8Builder) Descriptor() *Descriptor { + if b.desc.Default != nil || b.desc.UpdateDefault != nil { + b.desc.checkDefaultFunc(uint8Type) + } + b.desc.checkGoType(uint8Type) + return b.desc +} + +// uint16Builder is the builder for uint16 field. +type uint16Builder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *uint16Builder) Unique() *uint16Builder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *uint16Builder) Range(i, j uint16) *uint16Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint16) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *uint16Builder) Min(i uint16) *uint16Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint16) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *uint16Builder) Max(i uint16) *uint16Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint16) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *uint16Builder) Positive() *uint16Builder { + return b.Min(1) +} + +// Default sets the default value of the field. +func (b *uint16Builder) Default(i uint16) *uint16Builder { + b.desc.Default = i + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. +func (b *uint16Builder) DefaultFunc(fn any) *uint16Builder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Uint16("uint16"). +// Default(0). +// UpdateDefault(GenNumber), +func (b *uint16Builder) UpdateDefault(fn any) *uint16Builder { + b.desc.UpdateDefault = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *uint16Builder) Nillable() *uint16Builder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *uint16Builder) Comment(c string) *uint16Builder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *uint16Builder) Optional() *uint16Builder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *uint16Builder) Immutable() *uint16Builder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *uint16Builder) StructTag(s string) *uint16Builder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *uint16Builder) Validate(fn func(uint16) error) *uint16Builder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *uint16Builder) StorageKey(key string) *uint16Builder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for uint16. +// +// field.Uint16("oid"). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomType", +// }) +func (b *uint16Builder) SchemaType(types map[string]string) *uint16Builder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Uint16("uint16"). +// GoType(pkg.Uint16(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *uint16Builder) GoType(typ any) *uint16Builder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *uint16Builder) ValueScanner(vs any) *uint16Builder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Uint16("uint16"). +// Annotations(entgql.OrderField("UINT16")) +func (b *uint16Builder) Annotations(annotations ...schema.Annotation) *uint16Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *uint16Builder) Descriptor() *Descriptor { + if b.desc.Default != nil || b.desc.UpdateDefault != nil { + b.desc.checkDefaultFunc(uint16Type) + } + b.desc.checkGoType(uint16Type) + return b.desc +} + +// uint32Builder is the builder for uint32 field. +type uint32Builder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *uint32Builder) Unique() *uint32Builder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *uint32Builder) Range(i, j uint32) *uint32Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint32) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *uint32Builder) Min(i uint32) *uint32Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint32) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *uint32Builder) Max(i uint32) *uint32Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint32) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *uint32Builder) Positive() *uint32Builder { + return b.Min(1) +} + +// Default sets the default value of the field. +func (b *uint32Builder) Default(i uint32) *uint32Builder { + b.desc.Default = i + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. +func (b *uint32Builder) DefaultFunc(fn any) *uint32Builder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Uint32("uint32"). +// Default(0). +// UpdateDefault(GenNumber), +func (b *uint32Builder) UpdateDefault(fn any) *uint32Builder { + b.desc.UpdateDefault = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *uint32Builder) Nillable() *uint32Builder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *uint32Builder) Comment(c string) *uint32Builder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *uint32Builder) Optional() *uint32Builder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *uint32Builder) Immutable() *uint32Builder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *uint32Builder) StructTag(s string) *uint32Builder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *uint32Builder) Validate(fn func(uint32) error) *uint32Builder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *uint32Builder) StorageKey(key string) *uint32Builder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for uint32. +// +// field.Uint32("oid"). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomType", +// }) +func (b *uint32Builder) SchemaType(types map[string]string) *uint32Builder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Uint32("uint32"). +// GoType(pkg.Uint32(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *uint32Builder) GoType(typ any) *uint32Builder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *uint32Builder) ValueScanner(vs any) *uint32Builder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Uint32("uint32"). +// Annotations(entgql.OrderField("UINT32")) +func (b *uint32Builder) Annotations(annotations ...schema.Annotation) *uint32Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *uint32Builder) Descriptor() *Descriptor { + if b.desc.Default != nil || b.desc.UpdateDefault != nil { + b.desc.checkDefaultFunc(uint32Type) + } + b.desc.checkGoType(uint32Type) + return b.desc +} + +// uint64Builder is the builder for uint64 field. +type uint64Builder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *uint64Builder) Unique() *uint64Builder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *uint64Builder) Range(i, j uint64) *uint64Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint64) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *uint64Builder) Min(i uint64) *uint64Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint64) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *uint64Builder) Max(i uint64) *uint64Builder { + b.desc.Validators = append(b.desc.Validators, func(v uint64) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 1. Operation fails if the validator fails. +func (b *uint64Builder) Positive() *uint64Builder { + return b.Min(1) +} + +// Default sets the default value of the field. +func (b *uint64Builder) Default(i uint64) *uint64Builder { + b.desc.Default = i + return b +} + +// DefaultFunc sets the function that is applied to set the default value +// of the field on creation. +func (b *uint64Builder) DefaultFunc(fn any) *uint64Builder { + b.desc.Default = fn + return b +} + +// UpdateDefault sets the function that is applied to set default value +// of the field on update. For example: +// +// field.Uint64("uint64"). +// Default(0). +// UpdateDefault(GenNumber), +func (b *uint64Builder) UpdateDefault(fn any) *uint64Builder { + b.desc.UpdateDefault = fn + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *uint64Builder) Nillable() *uint64Builder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *uint64Builder) Comment(c string) *uint64Builder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *uint64Builder) Optional() *uint64Builder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *uint64Builder) Immutable() *uint64Builder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *uint64Builder) StructTag(s string) *uint64Builder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *uint64Builder) Validate(fn func(uint64) error) *uint64Builder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *uint64Builder) StorageKey(key string) *uint64Builder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for uint64. +// +// field.Uint64("oid"). +// SchemaType(map[string]string{ +// dialect.Postgres: "CustomType", +// }) +func (b *uint64Builder) SchemaType(types map[string]string) *uint64Builder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Uint64("uint64"). +// GoType(pkg.Uint64(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *uint64Builder) GoType(typ any) *uint64Builder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *uint64Builder) ValueScanner(vs any) *uint64Builder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Uint64("uint64"). +// Annotations(entgql.OrderField("UINT64")) +func (b *uint64Builder) Annotations(annotations ...schema.Annotation) *uint64Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *uint64Builder) Descriptor() *Descriptor { + if b.desc.Default != nil || b.desc.UpdateDefault != nil { + b.desc.checkDefaultFunc(uint64Type) + } + b.desc.checkGoType(uint64Type) + return b.desc +} + +var ( + intType = reflect.TypeOf(int(0)) + uintType = reflect.TypeOf(uint(0)) + int8Type = reflect.TypeOf(int8(0)) + int16Type = reflect.TypeOf(int16(0)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint8Type = reflect.TypeOf(uint8(0)) + uint16Type = reflect.TypeOf(uint16(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) +) + +// float64Builder is the builder for float fields. +type float64Builder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *float64Builder) Unique() *float64Builder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *float64Builder) Range(i, j float64) *float64Builder { + b.desc.Validators = append(b.desc.Validators, func(v float64) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *float64Builder) Min(i float64) *float64Builder { + b.desc.Validators = append(b.desc.Validators, func(v float64) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *float64Builder) Max(i float64) *float64Builder { + b.desc.Validators = append(b.desc.Validators, func(v float64) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 0.000001. Operation fails if the validator fails. +func (b *float64Builder) Positive() *float64Builder { + return b.Min(1e-06) +} + +// Negative adds a maximum value validator with the value of -0.000001. Operation fails if the validator fails. +func (b *float64Builder) Negative() *float64Builder { + return b.Max(-1e-06) +} + +// Default sets the default value of the field. +func (b *float64Builder) Default(i float64) *float64Builder { + b.desc.Default = i + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *float64Builder) Nillable() *float64Builder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *float64Builder) Comment(c string) *float64Builder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *float64Builder) Optional() *float64Builder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *float64Builder) Immutable() *float64Builder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *float64Builder) StructTag(s string) *float64Builder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *float64Builder) Validate(fn func(float64) error) *float64Builder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *float64Builder) StorageKey(key string) *float64Builder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for float64. +// +// field.Float64("amount"). +// SchemaType(map[string]string{ +// dialect.MySQL: "decimal(5, 2)", +// dialect.Postgres: "numeric(5, 2)", +// }) +func (b *float64Builder) SchemaType(types map[string]string) *float64Builder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Float64("float64"). +// GoType(pkg.Float64(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *float64Builder) GoType(typ any) *float64Builder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *float64Builder) ValueScanner(vs any) *float64Builder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Float64("float64"). +// Annotations(entgql.OrderField("FLOAT64")) +func (b *float64Builder) Annotations(annotations ...schema.Annotation) *float64Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *float64Builder) Descriptor() *Descriptor { + b.desc.checkGoType(float64Type) + return b.desc +} + +// float32Builder is the builder for float fields. +type float32Builder struct { + desc *Descriptor +} + +// Unique makes the field unique within all vertices of this type. +func (b *float32Builder) Unique() *float32Builder { + b.desc.Unique = true + return b +} + +// Range adds a range validator for this field where the given value needs to be in the range of [i, j]. +func (b *float32Builder) Range(i, j float32) *float32Builder { + b.desc.Validators = append(b.desc.Validators, func(v float32) error { + if v < i || v > j { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Min adds a minimum value validator for this field. Operation fails if the validator fails. +func (b *float32Builder) Min(i float32) *float32Builder { + b.desc.Validators = append(b.desc.Validators, func(v float32) error { + if v < i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Max adds a maximum value validator for this field. Operation fails if the validator fails. +func (b *float32Builder) Max(i float32) *float32Builder { + b.desc.Validators = append(b.desc.Validators, func(v float32) error { + if v > i { + return errors.New("value out of range") + } + return nil + }) + return b +} + +// Positive adds a minimum value validator with the value of 0.000001. Operation fails if the validator fails. +func (b *float32Builder) Positive() *float32Builder { + return b.Min(1e-06) +} + +// Negative adds a maximum value validator with the value of -0.000001. Operation fails if the validator fails. +func (b *float32Builder) Negative() *float32Builder { + return b.Max(-1e-06) +} + +// Default sets the default value of the field. +func (b *float32Builder) Default(i float32) *float32Builder { + b.desc.Default = i + return b +} + +// Nillable indicates that this field is a nillable. +// Unlike "Optional" only fields, "Nillable" fields are pointers in the generated struct. +func (b *float32Builder) Nillable() *float32Builder { + b.desc.Nillable = true + return b +} + +// Comment sets the comment of the field. +func (b *float32Builder) Comment(c string) *float32Builder { + b.desc.Comment = c + return b +} + +// Optional indicates that this field is optional on create. +// Unlike edges, fields are required by default. +func (b *float32Builder) Optional() *float32Builder { + b.desc.Optional = true + return b +} + +// Immutable indicates that this field cannot be updated. +func (b *float32Builder) Immutable() *float32Builder { + b.desc.Immutable = true + return b +} + +// StructTag sets the struct tag of the field. +func (b *float32Builder) StructTag(s string) *float32Builder { + b.desc.Tag = s + return b +} + +// Validate adds a validator for this field. Operation fails if the validation fails. +func (b *float32Builder) Validate(fn func(float32) error) *float32Builder { + b.desc.Validators = append(b.desc.Validators, fn) + return b +} + +// StorageKey sets the storage key of the field. +// In SQL dialects is the column name and Gremlin is the property. +func (b *float32Builder) StorageKey(key string) *float32Builder { + b.desc.StorageKey = key + return b +} + +// SchemaType overrides the default database type with a custom +// schema type (per dialect) for float32. +// +// field.Float32("amount"). +// SchemaType(map[string]string{ +// dialect.MySQL: "decimal(5, 2)", +// dialect.Postgres: "numeric(5, 2)", +// }) +func (b *float32Builder) SchemaType(types map[string]string) *float32Builder { + b.desc.SchemaType = types + return b +} + +// GoType overrides the default Go type with a custom one. +// If the provided type implements the Validator interface +// and no validators have been set, the type validator will +// be used. +// +// field.Float32("float32"). +// GoType(pkg.Float32(0)) +// +// Note that, the custom Go type `T` needs to implement the +// `Add(T) T` method in order to support the `Add` operation +// in mutations. For example: +// +// func(t1 T) Add(t2 T) T { +// return add(t1, t2) +// } +func (b *float32Builder) GoType(typ any) *float32Builder { + b.desc.goType(typ) + return b +} + +// ValueScanner provides an external value scanner for the given GoType. +// Using this option allow users to use field types that do not implement +// the sql.Scanner and driver.Valuer interfaces. +func (b *float32Builder) ValueScanner(vs any) *float32Builder { + b.desc.ValueScanner = vs + return b +} + +// Annotations adds a list of annotations to the field object to be used by +// codegen extensions. +// +// field.Float32("float32"). +// Annotations(entgql.OrderField("FLOAT32")) +func (b *float32Builder) Annotations(annotations ...schema.Annotation) *float32Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Field interface by returning its descriptor. +func (b *float32Builder) Descriptor() *Descriptor { + b.desc.checkGoType(float32Type) + return b.desc +} + +var ( + float64Type = reflect.TypeOf(float64(0)) + float32Type = reflect.TypeOf(float32(0)) +) diff --git a/vendor/entgo.io/ent/schema/field/type.go b/vendor/entgo.io/ent/schema/field/type.go new file mode 100644 index 00000000..92ab6975 --- /dev/null +++ b/vendor/entgo.io/ent/schema/field/type.go @@ -0,0 +1,249 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package field + +import ( + "fmt" + "reflect" + "strings" +) + +// A Type represents a field type. +type Type uint8 + +// List of field types. +const ( + TypeInvalid Type = iota + TypeBool + TypeTime + TypeJSON + TypeUUID + TypeBytes + TypeEnum + TypeString + TypeOther + TypeInt8 + TypeInt16 + TypeInt32 + TypeInt + TypeInt64 + TypeUint8 + TypeUint16 + TypeUint32 + TypeUint + TypeUint64 + TypeFloat32 + TypeFloat64 + endTypes +) + +// String returns the string representation of a type. +func (t Type) String() string { + if t < endTypes { + return typeNames[t] + } + return typeNames[TypeInvalid] +} + +// Numeric reports if the given type is a numeric type. +func (t Type) Numeric() bool { + return t >= TypeInt8 && t < endTypes +} + +// Float reports if the given type is a float type. +func (t Type) Float() bool { + return t == TypeFloat32 || t == TypeFloat64 +} + +// Integer reports if the given type is an integral type. +func (t Type) Integer() bool { + return t.Numeric() && !t.Float() +} + +// Valid reports if the given type if known type. +func (t Type) Valid() bool { + return t > TypeInvalid && t < endTypes +} + +// ConstName returns the constant name of an info type. +// It's used by entc for printing the constant name in templates. +func (t Type) ConstName() string { + switch { + case !t.Valid(): + return typeNames[TypeInvalid] + case int(t) < len(constNames) && constNames[t] != "": + return constNames[t] + default: + return "Type" + strings.Title(typeNames[t]) + } +} + +// TypeInfo holds the information regarding field type. +// Used by complex types like JSON and Bytes. +type TypeInfo struct { + Type Type + Ident string + PkgPath string // import path. + PkgName string // local package name. + Nillable bool // slices or pointers. + RType *RType +} + +// String returns the string representation of a type. +func (t TypeInfo) String() string { + switch { + case t.Ident != "": + return t.Ident + case t.Type < endTypes: + return typeNames[t.Type] + default: + return typeNames[TypeInvalid] + } +} + +// Valid reports if the given type if known type. +func (t TypeInfo) Valid() bool { + return t.Type.Valid() +} + +// Numeric reports if the given type is a numeric type. +func (t TypeInfo) Numeric() bool { + return t.Type.Numeric() +} + +// ConstName returns the const name of the info type. +func (t TypeInfo) ConstName() string { + return t.Type.ConstName() +} + +// ValueScanner indicates if this type implements the ValueScanner interface. +func (t TypeInfo) ValueScanner() bool { + return t.RType.Implements(valueScannerType) +} + +// Validator indicates if this type implements the Validator interface. +func (t TypeInfo) Validator() bool { + return t.RType.Implements(validatorType) +} + +// Valuer indicates if this type implements the driver.Valuer interface. +func (t TypeInfo) Valuer() bool { + return t.RType.Implements(valuerType) +} + +// Comparable reports whether values of this type are comparable. +func (t TypeInfo) Comparable() bool { + switch t.Type { + case TypeBool, TypeTime, TypeUUID, TypeEnum, TypeString: + return true + case TypeOther: + // Always accept custom types as comparable on the database side. + // In the future, we should consider adding an interface to let + // custom types tell if they are comparable or not (see #1304). + return true + default: + return t.Numeric() + } +} + +var stringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + +// Stringer indicates if this type implements the Stringer interface. +func (t TypeInfo) Stringer() bool { + return t.RType.Implements(stringerType) +} + +var ( + typeNames = [...]string{ + TypeInvalid: "invalid", + TypeBool: "bool", + TypeTime: "time.Time", + TypeJSON: "json.RawMessage", + TypeUUID: "[16]byte", + TypeBytes: "[]byte", + TypeEnum: "string", + TypeString: "string", + TypeOther: "other", + TypeInt: "int", + TypeInt8: "int8", + TypeInt16: "int16", + TypeInt32: "int32", + TypeInt64: "int64", + TypeUint: "uint", + TypeUint8: "uint8", + TypeUint16: "uint16", + TypeUint32: "uint32", + TypeUint64: "uint64", + TypeFloat32: "float32", + TypeFloat64: "float64", + } + constNames = [...]string{ + TypeJSON: "TypeJSON", + TypeUUID: "TypeUUID", + TypeTime: "TypeTime", + TypeEnum: "TypeEnum", + TypeBytes: "TypeBytes", + TypeOther: "TypeOther", + } +) + +// RType holds a serializable reflect.Type information of +// Go object. Used by the entc package. +type RType struct { + Name string // reflect.Type.Name + Ident string // reflect.Type.String + Kind reflect.Kind + PkgPath string + Methods map[string]struct{ In, Out []*RType } + // Used only for in-package checks. + rtype reflect.Type +} + +// TypeEqual reports if the underlying type is equal to the RType (after pointer indirections). +func (r *RType) TypeEqual(t reflect.Type) bool { + tv := indirect(t) + return r.Name == tv.Name() && r.Kind == t.Kind() && r.PkgPath == tv.PkgPath() +} + +// RType returns the string value of the indirect reflect.Type. +func (r *RType) String() string { + if r.rtype != nil { + return r.rtype.String() + } + return r.Ident +} + +// IsPtr reports if the reflect-type is a pointer type. +func (r *RType) IsPtr() bool { + return r != nil && r.Kind == reflect.Ptr +} + +// Implements reports whether the RType ~implements the given interface type. +func (r *RType) Implements(typ reflect.Type) bool { + if r == nil { + return false + } + n := typ.NumMethod() + for i := 0; i < n; i++ { + m0 := typ.Method(i) + m1, ok := r.Methods[m0.Name] + if !ok || len(m1.In) != m0.Type.NumIn() || len(m1.Out) != m0.Type.NumOut() { + return false + } + in := m0.Type.NumIn() + for j := 0; j < in; j++ { + if !m1.In[j].TypeEqual(m0.Type.In(j)) { + return false + } + } + out := m0.Type.NumOut() + for j := 0; j < out; j++ { + if !m1.Out[j].TypeEqual(m0.Type.Out(j)) { + return false + } + } + } + return true +} diff --git a/vendor/entgo.io/ent/schema/index/BUILD b/vendor/entgo.io/ent/schema/index/BUILD new file mode 100644 index 00000000..40db102b --- /dev/null +++ b/vendor/entgo.io/ent/schema/index/BUILD @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "index", + srcs = ["index.go"], + importmap = "go.resf.org/peridot/vendor/entgo.io/ent/schema/index", + importpath = "entgo.io/ent/schema/index", + visibility = ["//visibility:public"], + deps = ["//vendor/entgo.io/ent/schema"], +) diff --git a/vendor/entgo.io/ent/schema/index/index.go b/vendor/entgo.io/ent/schema/index/index.go new file mode 100644 index 00000000..23c15c6e --- /dev/null +++ b/vendor/entgo.io/ent/schema/index/index.go @@ -0,0 +1,121 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package index + +import "entgo.io/ent/schema" + +// A Descriptor for index configuration. +type Descriptor struct { + Unique bool // unique index. + Edges []string // edge columns. + Fields []string // field columns. + StorageKey string // custom index name. + Annotations []schema.Annotation // index annotations. +} + +// Builder for indexes on vertex columns and edges in the graph. +type Builder struct { + desc *Descriptor +} + +// Fields creates an index on the given vertex fields. +// Note that indexes are implemented only for SQL dialects, and does not support gremlin. +// +// func (T) Indexes() []ent.Index { +// +// // Unique index on 2 fields. +// index.Fields("first", "last"). +// Unique(), +// +// // Unique index of field under specific edge. +// index.Fields("name"). +// Edges("parent"). +// Unique(), +// +// } +// +func Fields(fields ...string) *Builder { + return &Builder{desc: &Descriptor{Fields: fields}} +} + +// Edges creates an index on the given vertex edge fields. +// Note that indexes are implemented only for SQL dialects, and does not support gremlin. +// +// func (T) Indexes() []ent.Index { +// +// // Unique index of field under 2 edges. +// index.Fields("name"). +// Edges("parent", "type"). +// Unique(), +// +// } +// +func Edges(edges ...string) *Builder { + return &Builder{desc: &Descriptor{Edges: edges}} +} + +// Fields sets the fields of the index. +// +// func (T) Indexes() []ent.Index { +// +// // Unique "name" and "age" fields under the "parent" edge. +// index.Edges("parent"). +// Fields("name", "age"). +// Unique(), +// +// } +func (b *Builder) Fields(fields ...string) *Builder { + b.desc.Fields = fields + return b +} + +// Edges sets the fields index to be unique under the set of edges (sub-graph). For example: +// +// func (T) Indexes() []ent.Index { +// +// // Unique "name" field under the "parent" edge. +// index.Fields("name"). +// Edges("parent"). +// Unique(), +// } +// +func (b *Builder) Edges(edges ...string) *Builder { + b.desc.Edges = edges + return b +} + +// Unique sets the index to be a unique index. +// Note that defining a uniqueness on optional fields won't prevent +// duplicates if one of the column contains NULL values. +func (b *Builder) Unique() *Builder { + b.desc.Unique = true + return b +} + +// StorageKey sets the storage key of the index. In SQL dialects, it's the index name. +func (b *Builder) StorageKey(key string) *Builder { + b.desc.StorageKey = key + return b +} + +// Annotations adds a list of annotations to the index object to be used by codegen extensions. +// +// func (T) Indexes() []ent.Index { +// +// // Partial index on name where the entity is not deleted. +// index.Fields("name"). +// Annotations(entsql.Prefix(100)) +// +// } +// +func (b *Builder) Annotations(annotations ...schema.Annotation) *Builder { + b.desc.Annotations = append(b.desc.Annotations, annotations...) + return b +} + +// Descriptor implements the ent.Descriptor interface. +func (b *Builder) Descriptor() *Descriptor { + return b.desc +} diff --git a/vendor/entgo.io/ent/schema/schema.go b/vendor/entgo.io/ent/schema/schema.go new file mode 100644 index 00000000..c893b9e3 --- /dev/null +++ b/vendor/entgo.io/ent/schema/schema.go @@ -0,0 +1,43 @@ +// Copyright 2019-present Facebook Inc. All rights reserved. +// This source code is licensed under the Apache 2.0 license found +// in the LICENSE file in the root directory of this source tree. + +package schema + +// Annotation is used to attach arbitrary metadata to the schema objects in codegen. +// The object must be serializable to JSON raw value (e.g. struct, map or slice). +// +// Template extensions can retrieve this metadata and use it inside their templates. +// Read more about it in ent website: https://entgo.io/docs/templates/#annotations. +type Annotation interface { + // Name defines the name of the annotation to be retrieved by the codegen. + Name() string +} + +// Merger wraps the single Merge function allows custom annotation to provide +// an implementation for merging 2 or more annotations from the same type. +// +// A common use case is where the same Annotation type is defined both in +// mixin.Schema and ent.Schema. +type Merger interface { + Merge(Annotation) Annotation +} + +// CommentAnnotation is a builtin schema annotation for +// configuring the schema's Godoc comment. +type CommentAnnotation struct { + Text string // Comment text. +} + +// Name implements the Annotation interface. +func (*CommentAnnotation) Name() string { + return "Comment" +} + +// Comment is a builtin schema annotation for +// configuring the schema's Godoc comment. +func Comment(text string) *CommentAnnotation { + return &CommentAnnotation{Text: text} +} + +var _ Annotation = (*CommentAnnotation)(nil) diff --git a/vendor/github.com/AppsFlyer/go-sundheit/.gitignore b/vendor/github.com/AppsFlyer/go-sundheit/.gitignore new file mode 100644 index 00000000..075312aa --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/.gitignore @@ -0,0 +1,19 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + + +*.iml +*.log +.idea +af-world +debug \ No newline at end of file diff --git a/vendor/github.com/AppsFlyer/go-sundheit/BUILD b/vendor/github.com/AppsFlyer/go-sundheit/BUILD new file mode 100644 index 00000000..a7e5fc96 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/BUILD @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go-sundheit", + srcs = [ + "check.go", + "check_listener.go", + "check_task.go", + "config.go", + "health.go", + "health_listener.go", + "options.go", + "types.go", + "utils.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/AppsFlyer/go-sundheit", + importpath = "github.com/AppsFlyer/go-sundheit", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/pkg/errors"], +) diff --git a/vendor/github.com/AppsFlyer/go-sundheit/LICENSE b/vendor/github.com/AppsFlyer/go-sundheit/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/AppsFlyer/go-sundheit/Makefile b/vendor/github.com/AppsFlyer/go-sundheit/Makefile new file mode 100644 index 00000000..6abb6856 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/Makefile @@ -0,0 +1,19 @@ +BIN_DIR := $(GOPATH)/bin +GOLANGCI_LINT := $(BIN_DIR)/golangci-lint + +all: build lint test + +build: + ##### building ##### + go build -v + +$(GOLANGCI_LINT): + GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.20.0 + +lint: $(GOLANGCI_LINT) + ##### linting ##### + golangci-lint run -E golint -E gosec -E gofmt + +test: build + ##### testing ##### + go test $(testflags) -v -race ./... diff --git a/vendor/github.com/AppsFlyer/go-sundheit/README.md b/vendor/github.com/AppsFlyer/go-sundheit/README.md new file mode 100644 index 00000000..0f9125d4 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/README.md @@ -0,0 +1,405 @@ +# go-sundheit +[![Actions Status](https://github.com/AppsFlyer/go-sundheit/workflows/go-build/badge.svg)](https://github.com/AppsFlyer/go-sundheit/actions) +[![CircleCI](https://circleci.com/gh/AppsFlyer/go-sundheit.svg?style=svg)](https://circleci.com/gh/AppsFlyer/go-sundheit) +[![Coverage Status](https://coveralls.io/repos/github/AppsFlyer/go-sundheit/badge.svg?branch=master)](https://coveralls.io/github/AppsFlyer/go-sundheit?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/AppsFlyer/go-sundheit)](https://goreportcard.com/report/github.com/AppsFlyer/go-sundheit) +[![Godocs](https://img.shields.io/badge/golang-documentation-blue.svg)](https://godoc.org/github.com/AppsFlyer/go-sundheit) +[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) + + + +A library built to provide support for defining service health for golang services. +It allows you to register async health checks for your dependencies and the service itself, +and provides a health endpoint that exposes their status. + +## What's go-sundheit? +The project is named after the German word `Gesundheit` which means ‘health’, and it is pronounced `/ɡəˈzʊntˌhaɪ̯t/`. + +## Installation +Using go modules: +``` +go get github.com/AppsFlyer/go-sundheit@v0.4.0 +``` + +## Usage +```go +import ( + "net/http" + "time" + "log" + + "github.com/pkg/errors" + "github.com/AppsFlyer/go-sundheit" + + healthhttp "github.com/AppsFlyer/go-sundheit/http" + "github.com/AppsFlyer/go-sundheit/checks" +) + +func main() { + // create a new health instance + h := gosundheit.New() + + // define an HTTP dependency check + httpCheckConf := checks.HTTPCheckConfig{ + CheckName: "httpbin.url.check", + Timeout: 1 * time.Second, + // dependency you're checking - use your own URL here... + // this URL will fail 50% of the times + URL: "http://httpbin.org/status/200,300", + } + // create the HTTP check for the dependency + // fail fast when you misconfigured the URL. Don't ignore errors!!! + httpCheck, err := checks.NewHTTPCheck(httpCheckConf) + if err != nil { + fmt.Println(err) + return // your call... + } + + // Alternatively panic when creating a check fails + httpCheck = checks.Must(checks.NewHTTPCheck(httpCheckConf)) + + err = h.RegisterCheck( + httpCheck, + gosundheit.InitialDelay(time.Second), // the check will run once after 1 sec + gosundheit.ExecutionPeriod(10 * time.Second), // the check will be executed every 10 sec + ) + + if err != nil { + fmt.Println("Failed to register check: ", err) + return // or whatever + } + + // define more checks... + + // register a health endpoint + http.Handle("/admin/health.json", healthhttp.HandleHealthJSON(h)) + + // serve HTTP + log.Fatal(http.ListenAndServe(":8080", nil)) +} +``` +### Using `Option` to Configure `Health` Service +To create a health service, it's simple as calling the following code: +```go +gosundheit.New(options ...Option) +``` +The optional parameters of `options` allows the user to configure the Health Service by passing configuration functions (implementing `Option` signature). +All options are marked with the prefix `WithX`. Available options: +- `WithCheckListeners` - enables you to act on check registration, start and completed events +- `WithHealthListeners` - enables you to act on changes in the health service results + +### Built-in Checks +The library comes with a set of built-in checks. +Currently implemented checks are as follows: + +#### HTTP built-in check +The HTTP check allows you to trigger an HTTP request to one of your dependencies, +and verify the response status, and optionally the content of the response body. +Example was given above in the [usage](#usage) section + +#### DNS built-in check(s) +The DNS checks allow you to perform lookup to a given hostname / domain name / CNAME / etc, +and validate that it resolves to at least the minimum number of required results. + +Creating a host lookup check is easy: +```go +// Schedule a host resolution check for `example.com`, requiring at least one results, and running every 10 sec +h.RegisterCheck( + checks.NewHostResolveCheck("example.com", 1), + gosundheit.ExecutionPeriod(10 * time.Second), +) +``` + +You may also use the low level `checks.NewResolveCheck` specifying a custom `LookupFunc` if you want to to perform other kinds of lookups. +For example you may register a reverse DNS lookup check like so: +```go +func ReverseDNLookup(ctx context.Context, addr string) (resolvedCount int, err error) { + names, err := net.DefaultResolver.LookupAddr(ctx, addr) + resolvedCount = len(names) + return +} + +//... + +h.RegisterCheck( + checks.NewResolveCheck(ReverseDNLookup, "127.0.0.1", 3), + gosundheit.ExecutionPeriod(10 * time.Second), + gosundheit.ExecutionTimeout(1*time.Second) +) +``` + +#### Ping built-in check(s) +The ping checks allow you to verifies that a resource is still alive and reachable. +For example, you can use it as a DB ping check (`sql.DB` implements the Pinger interface): +```go + db, err := sql.Open(...) + dbCheck, err := checks.NewPingCheck("db.check", db) + _ = h.RegisterCheck(&gosundheit.Config{ + Check: dbCheck, + // ... + }) +``` + +You can also use the ping check to test a generic connection like so: +```go + pinger := checks.NewDialPinger("tcp", "example.com") + pingCheck, err := checks.NewPingCheck("example.com.reachable", pinger) + h.RegisterCheck(pingCheck) +``` + +The `NewDialPinger` function supports all the network/address parameters supported by the `net.Dial()` function(s) + +### Custom Checks +The library provides 2 means of defining a custom check. +The bottom line is that you need an implementation of the `Check` interface: +```go +// Check is the API for defining health checks. +// A valid check has a non empty Name() and a check (Execute()) function. +type Check interface { + // Name is the name of the check. + // Check names must be metric compatible. + Name() string + // Execute runs a single time check, and returns an error when the check fails, and an optional details object. + Execute() (details interface{}, err error) +} +``` +See examples in the following 2 sections below. + +#### Use the CustomCheck struct +The `checksCustomCheck` struct implements the `checks.Check` interface, +and is the simplest way to implement a check if all you need is to define a check function. + +Let's define a check function that fails 50% of the times: +```go +func lotteryCheck() (details interface{}, err error) { + lottery := rand.Float32() + details = fmt.Sprintf("lottery=%f", lottery) + if lottery < 0.5 { + err = errors.New("Sorry, I failed") + } + return +} +``` + +Now we register the check to start running right away, and execute once per 2 minutes with a timeout of 5 seconds: +```go +h := gosundheit.New() +... + +h.RegisterCheck( + &checks.CustomCheck{ + CheckName: "lottery.check", + CheckFunc: lotteryCheck, + }, + gosundheit.InitialDelay(0), + gosundheit.ExecutionPeriod(2 * time.Minute), + gosundheit.ExecutionTimeout(5 * time.Second) +) +``` + +#### Implement the Check interface +Sometimes you need to define a more elaborate custom check. +For example when you need to manage state. +For these cases it's best to implement the `Check` interface yourself. + +Let's define a flexible example of the lottery check, that allows you to define a fail probability: +```go +type Lottery struct { + myname string + probability float32 +} + +func (l Lottery) Execute() (details interface{}, err error) { + lottery := rand.Float32() + details = fmt.Sprintf("lottery=%f", lottery) + if lottery < l.probability { + err = errors.New("Sorry, I failed") + } + return +} + +func (l Lottery) Name() string { + return l.myname +} +``` + +And register our custom check, scheduling it to run every 30 seconds (after a 1 second initial delay) with a 5 seconds timeout: +```go +h := gosundheit.New() +... + +h.RegisterCheck( + Lottery{myname: "custom.lottery.check", probability:0.3}, + gosundheit.InitialDelay(1*time.Second), + gosundheit.ExecutionPeriod(30*time.Second), + gosundheit.ExecutionTimeout(5*time.Second), +) +``` + +#### Custom Checks Notes +1. If a check take longer than the specified rate period, then next execution will be delayed, +but will not be concurrently executed. +1. Checks must complete within a reasonable time. If a check doesn't complete or gets hung, +the next check execution will be delayed. Use proper time outs. +1. Checks must respect the provided context. Specifically, a check must abort its execution, and return an error, if the context has been cancelled. +1. **A health-check name must be a metric name compatible string** + (i.e. no funky characters, and spaces allowed - just make it simple like `clicks-db-check`). + See here: https://help.datadoghq.com/hc/en-us/articles/203764705-What-are-valid-metric-names- + +### Expose Health Endpoint +The library provides an HTTP handler function for serving health stats in JSON format. +You can register it using your favorite HTTP implementation like so: +```go +http.Handle("/admin/health.json", healthhttp.HandleHealthJSON(h)) +``` +The endpoint can be called like so: +```text +~ $ curl -i http://localhost:8080/admin/health.json +HTTP/1.1 503 Service Unavailable +Content-Type: application/json +Date: Tue, 22 Jan 2019 09:31:46 GMT +Content-Length: 701 + +{ + "custom.lottery.check": { + "message": "lottery=0.206583", + "error": { + "message": "Sorry, I failed" + }, + "timestamp": "2019-01-22T11:31:44.632415432+02:00", + "num_failures": 2, + "first_failure_time": "2019-01-22T11:31:41.632400256+02:00" + }, + "lottery.check": { + "message": "lottery=0.865335", + "timestamp": "2019-01-22T11:31:44.63244047+02:00", + "num_failures": 0, + "first_failure_time": null + }, + "url.check": { + "message": "http://httpbin.org/status/200,300", + "error": { + "message": "unexpected status code: '300' expected: '200'" + }, + "timestamp": "2019-01-22T11:31:44.632442937+02:00", + "num_failures": 4, + "first_failure_time": "2019-01-22T11:31:38.632485339+02:00" + } +} +``` +Or for the shorter version: +```text +~ $ curl -i http://localhost:8080/admin/health.json?type=short +HTTP/1.1 503 Service Unavailable +Content-Type: application/json +Date: Tue, 22 Jan 2019 09:40:19 GMT +Content-Length: 105 + +{ + "custom.lottery.check": "PASS", + "lottery.check": "PASS", + "my.check": "FAIL", + "url.check": "PASS" +} +``` + +The `short` response type is suitable for the consul health checks / LB heath checks. + +The response code is `200` when the tests pass, and `503` when they fail. + +### CheckListener +It is sometimes desired to keep track of checks execution and apply custom logic. +For example, you may want to add logging, or external metrics to your checks, +or add some trigger some recovery logic when a check fails after 3 consecutive times. + +The `gosundheit.CheckListener` interface allows you to hook this custom logic. + +For example, lets add a logging listener to our health repository: +```go +type checkEventsLogger struct{} + +func (l checkEventsLogger) OnCheckRegistered(name string, res gosundheit.Result) { + log.Printf("Check %q registered with initial result: %v\n", name, res) +} + +func (l checkEventsLogger) OnCheckStarted(name string) { + log.Printf("Check %q started...\n", name) +} + +func (l checkEventsLogger) OnCheckCompleted(name string, res gosundheit.Result) { + log.Printf("Check %q completed with result: %v\n", name, res) +} +``` + +To register your listener: +```go +h := gosundheit.New(gosundheit.WithCheckListeners(&checkEventsLogger)) +``` + +Please note that your `CheckListener` implementation must not block! + +### HealthListener +It is something desired to track changes in registered checks results. +For example, you may want to log the amount of results monitored, or send metrics on these results. + +The `gosundheit.HealthListener` interface allows you to hook this custom logic. + +For example, lets add a logging listener: +```go +type healthLogger struct{} + +func (l healthLogger) OnResultsUpdated(results map[string]Result) { + log.Printf("There are %d results, general health is %t\n", len(results), allHealthy(results)) +} +``` + +To register your listener: +```go +h := gosundheit.New(gosundheit.WithHealthListeners(&checkHealthLogger)) +``` + +## Metrics +The library can expose metrics using a `CheckListener`. At the moment, OpenCensus is available and exposes the following metrics: +* `health/check_status_by_name` - An aggregated health status gauge (0/1 for fail/pass) at the time of sampling. +The aggregation uses the following tags: + * `check=allChecks` - all checks aggregation + * `check=` - specific check aggregation +* `health/check_count_by_name_and_status` - Aggregated pass/fail counts for checks, with the following tags: + * `check=allChecks` - all checks aggregation + * `check=` - specific check aggregation + * `check-passing=[true|false]` +* `health/executeTime` - The time it took to execute a checks. Using the following tag: + * `check=` - specific check aggregation + + +The views can be registered like so: +```go +import ( + "github.com/AppsFlyer/go-sundheit" + "github.com/AppsFlyer/go-sundheit/opencensus" + "go.opencensus.io/stats/view" +) +// This listener can act both as check and health listener for reporting metrics +oc := opencensus.NewMetricsListener() +h := gosundheit.New(gosundheit.WithCheckListeners(oc), gosundheit.WithHealthListeners(oc)) +// ... +view.Register(opencensus.DefaultHealthViews...) +// or register individual views. For example: +view.Register(opencensus.ViewCheckExecutionTime, opencensus.ViewCheckStatusByName, ...) +``` + +### Classification + +It is sometimes required to report metrics for different check types (e.g. setup, liveness, readiness). +To report metrics using `classification` tag - it's possible to initialize the OpenCensus listener with classification: + +```go +// startup +opencensus.NewMetricsListener(opencensus.WithStartupClassification()) +// liveness +opencensus.NewMetricsListener(opencensus.WithLivenessClassification()) +// readiness +opencensus.NewMetricsListener(opencensus.WithReadinessClassification()) +// custom +opencensus.NewMetricsListener(opencensus.WithClassification("custom")) +``` diff --git a/vendor/github.com/AppsFlyer/go-sundheit/check.go b/vendor/github.com/AppsFlyer/go-sundheit/check.go new file mode 100644 index 00000000..1edb70f7 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/check.go @@ -0,0 +1,14 @@ +package gosundheit + +import "context" + +// Check is the API for defining health checks. +// A valid check has a non empty Name() and a check (Execute()) function. +type Check interface { + // Name is the name of the check. + // Check names must be metric compatible. + Name() string + // Execute runs a single time check, and returns an error when the check fails, and an optional details object. + // The function is expected to exit as soon as the provided Context is Done. + Execute(ctx context.Context) (details interface{}, err error) +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/check_listener.go b/vendor/github.com/AppsFlyer/go-sundheit/check_listener.go new file mode 100644 index 00000000..14be6e4d --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/check_listener.go @@ -0,0 +1,39 @@ +package gosundheit + +// CheckListener can be used to gain check stats or log check transitions. +// Implementations of this interface **must not block!** +// If an implementation blocks, it may result in delayed execution of other health checks down the line. +// It's OK to log in the implementation and it's OK to add metrics, but it's not OK to run anything that +// takes long time to complete such as network IO etc. +type CheckListener interface { + // OnCheckRegistered is called when the check with the specified name has registered. + // Result argument is for reporting the first run state of the check + OnCheckRegistered(name string, result Result) + + // OnCheckStarted is called when a check with the specified name has started + OnCheckStarted(name string) + + // OnCheckCompleted is called when the check with the specified name has completed it's execution. + // The results are passed as an argument + OnCheckCompleted(name string, result Result) +} + +type CheckListeners []CheckListener + +func (c CheckListeners) OnCheckRegistered(name string, result Result) { + for _, listener := range c { + listener.OnCheckRegistered(name, result) + } +} + +func (c CheckListeners) OnCheckStarted(name string) { + for _, listener := range c { + listener.OnCheckStarted(name) + } +} + +func (c CheckListeners) OnCheckCompleted(name string, result Result) { + for _, listener := range c { + listener.OnCheckCompleted(name, result) + } +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/check_task.go b/vendor/github.com/AppsFlyer/go-sundheit/check_task.go new file mode 100644 index 00000000..2d44aaeb --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/check_task.go @@ -0,0 +1,36 @@ +package gosundheit + +import ( + "context" + "time" +) + +type checkTask struct { + stopChan chan bool + ticker *time.Ticker + check Check + timeout time.Duration +} + +func (t *checkTask) stop() { + if t.ticker != nil { + t.ticker.Stop() + } +} + +func (t *checkTask) execute(ctx context.Context) (details interface{}, duration time.Duration, err error) { + timeoutCtx, cancel := contextWithTimeout(ctx, t.timeout) + defer cancel() + startTime := time.Now() + details, err = t.check.Execute(timeoutCtx) + duration = time.Since(startTime) + + return +} + +func contextWithTimeout(parent context.Context, t time.Duration) (context.Context, context.CancelFunc) { + if t <= 0 { + return context.WithCancel(parent) + } + return context.WithTimeout(parent, t) +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/checks/BUILD b/vendor/github.com/AppsFlyer/go-sundheit/checks/BUILD new file mode 100644 index 00000000..0ad46844 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/checks/BUILD @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "checks", + srcs = [ + "custom.go", + "dns.go", + "http.go", + "must.go", + "ping.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/AppsFlyer/go-sundheit/checks", + importpath = "github.com/AppsFlyer/go-sundheit/checks", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/AppsFlyer/go-sundheit", + "//vendor/github.com/pkg/errors", + ], +) diff --git a/vendor/github.com/AppsFlyer/go-sundheit/checks/custom.go b/vendor/github.com/AppsFlyer/go-sundheit/checks/custom.go new file mode 100644 index 00000000..a99b205b --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/checks/custom.go @@ -0,0 +1,31 @@ +package checks + +import ( + "context" + gosundheit "github.com/AppsFlyer/go-sundheit" +) + +// CustomCheck is a simple Check implementation if all you need is a functional check +type CustomCheck struct { + // CheckName s the name of the check. + CheckName string + // CheckFunc is a function that runs a single time check, and returns an error when the check fails, and an optional details object. + CheckFunc func(ctx context.Context) (details interface{}, err error) +} + +var _ gosundheit.Check = (*CustomCheck)(nil) + +// Name is the name of the check. +// Check names must be metric compatible. +func (check *CustomCheck) Name() string { + return check.CheckName +} + +// Execute runs the given Checkfunc, and return it's output. +func (check *CustomCheck) Execute(ctx context.Context) (details interface{}, err error) { + if check.CheckFunc == nil { + return "Unimplemented check", nil + } + + return check.CheckFunc(ctx) +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/checks/dns.go b/vendor/github.com/AppsFlyer/go-sundheit/checks/dns.go new file mode 100644 index 00000000..d74a4e0b --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/checks/dns.go @@ -0,0 +1,52 @@ +package checks + +import ( + "context" + "fmt" + "net" + + "github.com/pkg/errors" + + gosundheit "github.com/AppsFlyer/go-sundheit" +) + +// NewHostResolveCheck returns a gosundheit.Check that makes sure the provided host can resolve +// to at least `minRequiredResults` IP address within the timeout specified by the provided context.. +func NewHostResolveCheck(host string, minRequiredResults int) gosundheit.Check { + return NewResolveCheck(NewHostLookup(nil), host, minRequiredResults) +} + +// LookupFunc is a function that is used for looking up something (in DNS) and return the resolved results count, and a possible error +type LookupFunc func(ctx context.Context, lookFor string) (resolvedCount int, err error) + +// NewResolveCheck returns a gosundheit.Check that makes sure the `resolveThis` arg can be resolved using the `lookupFn` +// to at least `minRequiredResults` result, within the timeout specified by the provided context. +func NewResolveCheck(lookupFn LookupFunc, resolveThis string, minRequiredResults int) gosundheit.Check { + return &CustomCheck{ + CheckName: "resolve." + resolveThis, + CheckFunc: func(ctx context.Context) (details interface{}, err error) { + resolvedCount, err := lookupFn(ctx, resolveThis) + details = fmt.Sprintf("[%d] results were resolved", resolvedCount) + if err != nil { + return + } + if resolvedCount < minRequiredResults { + err = errors.Errorf("[%s] lookup returned %d results, but requires at least %d", resolveThis, resolvedCount, minRequiredResults) + } + return + }, + } +} + +// NewHostLookup creates a LookupFunc that looks up host addresses +func NewHostLookup(resolver *net.Resolver) LookupFunc { + if resolver == nil { + resolver = net.DefaultResolver + } + + return func(ctx context.Context, host string) (resolvedCount int, err error) { + addrs, err := resolver.LookupHost(ctx, host) + resolvedCount = len(addrs) + return + } +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/checks/http.go b/vendor/github.com/AppsFlyer/go-sundheit/checks/http.go new file mode 100644 index 00000000..f42c3a0a --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/checks/http.go @@ -0,0 +1,144 @@ +package checks + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + gosundheit "github.com/AppsFlyer/go-sundheit" + "github.com/pkg/errors" +) + +// HTTPCheckConfig configures a check for the response from a given URL. +// The only required field is `URL`, which must be a valid URL. +type HTTPCheckConfig struct { + // CheckName is the health check name - must be a valid metric name. + // CheckName is required + CheckName string + // URL is required valid URL, to be called by the check + URL string + // Method is the HTTP method to use for this check. + // Method is optional and defaults to `GET` if undefined. + Method string + // Body is an optional request body to be posted to the target URL. + Body BodyProvider + // ExpectedStatus is the expected response status code, defaults to `200`. + ExpectedStatus int + // ExpectedBody is optional; if defined, operates as a basic "body should contain ". + ExpectedBody string + // Client is optional; if undefined, a new client will be created using "Timeout". + Client *http.Client + // Timeout is the timeout used for the HTTP request, defaults to "1s". + Timeout time.Duration + // Options allow you to configure the HTTP request with arbitrary settings, e.g. add request headers, etc. + Options []RequestOption +} + +// RequestOption configures the request with arbitrary settings, e.g. add request headers, etc. +type RequestOption func(r *http.Request) + +type httpCheck struct { + config *HTTPCheckConfig + successDetails string +} + +// BodyProvider allows the users to provide a body to the HTTP checks. For example for posting a payload as a check. +type BodyProvider func() io.Reader + +// NewHTTPCheck creates a new http check defined by the given config +func NewHTTPCheck(config HTTPCheckConfig) (check gosundheit.Check, err error) { + if config.URL == "" { + return nil, errors.Errorf("URL must not be empty") + } + _, err = url.Parse(config.URL) + if err != nil { + return nil, errors.WithStack(err) + } + if config.CheckName == "" { + return nil, errors.Errorf("CheckName must not be empty") + } + + if config.ExpectedStatus == 0 { + config.ExpectedStatus = http.StatusOK + } + if config.Method == "" { + config.Method = http.MethodGet + } + if config.Body == nil { + config.Body = func() io.Reader { return http.NoBody } + } + if config.Timeout == 0 { + config.Timeout = time.Second + } + if config.Client == nil { + config.Client = &http.Client{} + } + config.Client.Timeout = config.Timeout + + check = &httpCheck{ + config: &config, + successDetails: fmt.Sprintf("URL [%s] is accessible", config.URL), + } + return check, nil +} + +func (check *httpCheck) Name() string { + return check.config.CheckName +} + +func (check *httpCheck) Execute(ctx context.Context) (details interface{}, err error) { + details = check.config.URL + resp, err := check.fetchURL(ctx) + if err != nil { + return details, err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != check.config.ExpectedStatus { + return details, errors.Errorf("unexpected status code: '%v' expected: '%v'", + resp.StatusCode, check.config.ExpectedStatus) + } + + if check.config.ExpectedBody != "" { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return details, errors.Errorf("failed to read response body: %v", err) + } + + if !strings.Contains(string(body), check.config.ExpectedBody) { + return details, errors.Errorf("body does not contain expected content '%v'", check.config.ExpectedBody) + } + } + + return check.successDetails, nil + +} + +// fetchURL executes the HTTP request to the target URL, and returns a `http.Response`, error. +// It is the callers responsibility to close the response body +func (check *httpCheck) fetchURL(ctx context.Context) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, check.config.Method, check.config.URL, check.config.Body()) + if err != nil { + return nil, errors.Errorf("unable to create check HTTP request: %v", err) + } + + configureHTTPOptions(req, check.config.Options) + + resp, err := check.config.Client.Do(req) + if err != nil { + return nil, errors.Errorf("fail to execute '%v' request: %v", check.config.Method, err) + } + + return resp, nil +} + +func configureHTTPOptions(req *http.Request, options []RequestOption) { + for _, opt := range options { + opt(req) + } +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/checks/must.go b/vendor/github.com/AppsFlyer/go-sundheit/checks/must.go new file mode 100644 index 00000000..95f69102 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/checks/must.go @@ -0,0 +1,14 @@ +package checks + +import gosundheit "github.com/AppsFlyer/go-sundheit" + +// Must is a helper that wraps a call to a function returning (gosundheit.Check, error) and panics if the error is non-nil. +// It is intended for use in check initializations such as +// c := checks.Must(checks.NewHTTPCheck(/*...*/)) +func Must(check gosundheit.Check, err error) gosundheit.Check { + if err != nil { + panic(err) + } + + return check +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/checks/ping.go b/vendor/github.com/AppsFlyer/go-sundheit/checks/ping.go new file mode 100644 index 00000000..710fd78b --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/checks/ping.go @@ -0,0 +1,51 @@ +package checks + +import ( + "context" + "net" + + "github.com/pkg/errors" + + gosundheit "github.com/AppsFlyer/go-sundheit" +) + +// Pinger verifies a resource is still alive. +// This would normally be a TCP dial check, a db.PingContext() or something similar. +type Pinger interface { + PingContext(ctx context.Context) error +} + +// PingContextFunc type is an adapter to allow the use of ordinary functions as Pingers. +type PingContextFunc func(ctx context.Context) error + +// PingContext calls f(ctx). +func (f PingContextFunc) PingContext(ctx context.Context) error { + return f(ctx) +} + +// NewPingCheck returns a Check that pings using the specified Pinger and fails on context cancellation or ping failure +func NewPingCheck(name string, pinger Pinger) (gosundheit.Check, error) { + if pinger == nil { + return nil, errors.New("Pinger must not be nil") + } + + return &CustomCheck{ + CheckName: name, + CheckFunc: func(ctx context.Context) (details interface{}, err error) { + return nil, pinger.PingContext(ctx) + }, + }, nil +} + +// NewDialPinger returns a Pinger that pings the specified address +func NewDialPinger(network, address string) PingContextFunc { + var d net.Dialer + return func(ctx context.Context) error { + conn, err := d.DialContext(ctx, network, address) + if err == nil { + _ = conn.Close() + } + + return err + } +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/config.go b/vendor/github.com/AppsFlyer/go-sundheit/config.go new file mode 100644 index 00000000..3ca18c1f --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/config.go @@ -0,0 +1,21 @@ +package gosundheit + +import ( + "time" +) + +// checkConfig configures a health Check and it's scheduling timing requirements. +type checkConfig struct { + // executionPeriod is the period between successive executions. + executionPeriod time.Duration + + // initialDelay is the time to delay first execution; defaults to zero. + initialDelay time.Duration + + // initiallyPassing indicates when true, the check will be treated as passing before the first run; defaults to false + initiallyPassing bool + + // executionTimeout is the maximum allowed execution time for a check. If this timeout is exceeded, the provided Context will be cancelled. + // defaults to no timeout. + executionTimeout time.Duration +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/health.go b/vendor/github.com/AppsFlyer/go-sundheit/health.go new file mode 100644 index 00000000..da41f642 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/health.go @@ -0,0 +1,243 @@ +package gosundheit + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/pkg/errors" +) + +// Health is the API for registering / deregistering health checks, and for fetching the health checks results. +type Health interface { + // RegisterCheck registers a health check according to the given configuration. + // Once RegisterCheck() is called, the check is scheduled to run in it's own goroutine. + // Callers must make sure the checks complete at a reasonable time frame, or the next execution will delay. + RegisterCheck(check Check, opts ...CheckOption) error + // Deregister removes a health check from this instance, and stops it's next executions. + // If the check is running while Deregister() is called, the check may complete it's current execution. + // Once a check is removed, it's results are no longer returned. + Deregister(name string) + // Results returns a snapshot of the health checks execution results at the time of calling, and the current health. + // A system is considered healthy iff all checks are passing + Results() (results map[string]Result, healthy bool) + // IsHealthy returns the current health of the system. + // A system is considered healthy iff all checks are passing. + IsHealthy() bool + // DeregisterAll Deregister removes all health checks from this instance, and stops their next executions. + // It is equivalent of calling Deregister() for each currently registered check. + DeregisterAll() +} + +// New returns a new Health instance. +func New(opts ...HealthOption) Health { + h := &health{ + ctx: context.TODO(), + results: make(map[string]Result, maxExpectedChecks), + checkTasks: make(map[string]checkTask, maxExpectedChecks), + } + for _, opt := range append(opts, WithDefaults()) { + opt.apply(h) + } + + return h +} + +type health struct { + ctx context.Context + results map[string]Result + checkTasks map[string]checkTask + checksListener CheckListeners + healthListener HealthListeners + lock sync.RWMutex + + // Check config defaults + defaultExecutionPeriod time.Duration + defaultInitialDelay time.Duration + defaultInitiallyPassing bool +} + +func (h *health) RegisterCheck(check Check, opts ...CheckOption) error { + if check == nil { + return errors.New("check must not be nil") + } + if check.Name() == "" { + return errors.New("check name must not be empty") + } + + cfg := h.initCheckConfig(opts) + + if cfg.executionPeriod <= 0 { + return errors.New("execution period must be greater than 0") + } + + // checks are initially failing by default, but we allow overrides... + var initialErr error + if !cfg.initiallyPassing { + initialErr = fmt.Errorf(initialResultMsg) + } + + result := h.updateResult(check.Name(), initialResultMsg, 0, initialErr, time.Now()) + h.checksListener.OnCheckRegistered(check.Name(), result) + h.scheduleCheck(h.createCheckTask(check, cfg.executionTimeout), cfg.initialDelay, cfg.executionPeriod) + return nil +} + +func (h *health) initCheckConfig(opts []CheckOption) checkConfig { + cfg := checkConfig{ + executionPeriod: h.defaultExecutionPeriod, + initialDelay: h.defaultInitialDelay, + initiallyPassing: h.defaultInitiallyPassing, + } + + for _, opt := range opts { + opt.applyCheck(&cfg) + } + + return cfg +} + +func (h *health) createCheckTask(check Check, timeout time.Duration) *checkTask { + h.lock.Lock() + defer h.lock.Unlock() + + task := checkTask{ + stopChan: make(chan bool, 1), + check: check, + timeout: timeout, + } + h.checkTasks[check.Name()] = task + + return &task +} + +func (h *health) stopCheckTask(name string) { + h.lock.Lock() + defer h.lock.Unlock() + + task := h.checkTasks[name] + + task.stop() + + delete(h.results, name) + delete(h.checkTasks, name) +} + +func (h *health) scheduleCheck(task *checkTask, initialDelay, executionPeriod time.Duration) { + go func() { + // initial execution + if !h.runCheckOrStop(task, time.After(initialDelay)) { + return + } + h.reportResults() + // scheduled recurring execution + task.ticker = time.NewTicker(executionPeriod) + for { + if !h.runCheckOrStop(task, task.ticker.C) { + return + } + h.reportResults() + } + }() +} + +func (h *health) reportResults() { + h.lock.RLock() + resultsCopy := copyResultsMap(h.results) + h.lock.RUnlock() + h.healthListener.OnResultsUpdated(resultsCopy) +} + +func (h *health) runCheckOrStop(task *checkTask, timerChan <-chan time.Time) bool { + select { + case <-task.stopChan: + h.stopCheckTask(task.check.Name()) + return false + case t := <-timerChan: + h.checkAndUpdateResult(task, t) + return true + } +} + +func (h *health) checkAndUpdateResult(task *checkTask, checkTime time.Time) { + h.checksListener.OnCheckStarted(task.check.Name()) + details, duration, err := task.execute(h.ctx) + result := h.updateResult(task.check.Name(), details, duration, err, checkTime) + h.checksListener.OnCheckCompleted(task.check.Name(), result) +} + +func (h *health) Deregister(name string) { + h.lock.RLock() + defer h.lock.RUnlock() + + task, ok := h.checkTasks[name] + if ok { + // actual cleanup happens in the task go routine + task.stopChan <- true + } +} + +func (h *health) DeregisterAll() { + h.lock.RLock() + defer h.lock.RUnlock() + + for _, task := range h.checkTasks { + task.stopChan <- true + } +} + +func (h *health) Results() (results map[string]Result, healthy bool) { + h.lock.RLock() + defer h.lock.RUnlock() + + results = make(map[string]Result, len(h.results)) + + healthy = true + for k, v := range h.results { + results[k] = v + healthy = healthy && v.IsHealthy() + } + + return +} + +func (h *health) IsHealthy() (healthy bool) { + h.lock.RLock() + defer h.lock.RUnlock() + + return allHealthy(h.results) +} + +func (h *health) updateResult( + name string, details interface{}, checkDuration time.Duration, err error, t time.Time) (result Result) { + + h.lock.Lock() + defer h.lock.Unlock() + + prevResult, ok := h.results[name] + result = Result{ + Details: details, + Error: newMarshalableError(err), + Timestamp: t, + Duration: checkDuration, + TimeOfFirstFailure: nil, + } + + if !result.IsHealthy() { + if ok { + result.ContiguousFailures = prevResult.ContiguousFailures + 1 + if prevResult.IsHealthy() { + result.TimeOfFirstFailure = &t + } else { + result.TimeOfFirstFailure = prevResult.TimeOfFirstFailure + } + } else { + result.ContiguousFailures = 1 + result.TimeOfFirstFailure = &t + } + } + + h.results[name] = result + return result +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/health_listener.go b/vendor/github.com/AppsFlyer/go-sundheit/health_listener.go new file mode 100644 index 00000000..22ee1a25 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/health_listener.go @@ -0,0 +1,13 @@ +package gosundheit + +type HealthListener interface { + OnResultsUpdated(results map[string]Result) +} + +type HealthListeners []HealthListener + +func (h HealthListeners) OnResultsUpdated(results map[string]Result) { + for _, listener := range h { + listener.OnResultsUpdated(results) + } +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/http/BUILD b/vendor/github.com/AppsFlyer/go-sundheit/http/BUILD new file mode 100644 index 00000000..1130aa74 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/http/BUILD @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "http", + srcs = ["handler.go"], + importmap = "go.resf.org/peridot/vendor/github.com/AppsFlyer/go-sundheit/http", + importpath = "github.com/AppsFlyer/go-sundheit/http", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/AppsFlyer/go-sundheit"], +) diff --git a/vendor/github.com/AppsFlyer/go-sundheit/http/handler.go b/vendor/github.com/AppsFlyer/go-sundheit/http/handler.go new file mode 100644 index 00000000..7de0b33e --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/http/handler.go @@ -0,0 +1,49 @@ +package http + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/AppsFlyer/go-sundheit" +) + +const ( + // ReportTypeShort is the value to be passed in the request parameter `type` when a short response is desired. + ReportTypeShort = "short" +) + +// HandleHealthJSON returns an HandlerFunc that can be used as an endpoints that exposes the service health +func HandleHealthJSON(h gosundheit.Health) http.HandlerFunc { + return func(w http.ResponseWriter, request *http.Request) { + results, healthy := h.Results() + w.Header().Set("Content-Type", "application/json") + if healthy { + w.WriteHeader(200) + } else { + w.WriteHeader(503) + } + + encoder := json.NewEncoder(w) + encoder.SetIndent("", "\t") + var err error + if request.URL.Query().Get("type") == ReportTypeShort { + shortResults := make(map[string]string) + for k, v := range results { + if v.IsHealthy() { + shortResults[k] = "PASS" + } else { + shortResults[k] = "FAIL" + } + } + + err = encoder.Encode(shortResults) + } else { + err = encoder.Encode(results) + } + + if err != nil { + _, _ = fmt.Fprintf(w, "Failed to render results JSON: %s", err) + } + } +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/options.go b/vendor/github.com/AppsFlyer/go-sundheit/options.go new file mode 100644 index 00000000..9014cd24 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/options.go @@ -0,0 +1,120 @@ +package gosundheit + +import ( + "time" +) + +// HealthOption configures a health checker using the functional options paradigm +// popularized by Rob Pike and Dave Cheney. +// If you're unfamiliar with this style, see: +// - https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html +// - https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis. +// - https://sagikazarmark.hu/blog/functional-options-on-steroids/ +type HealthOption interface { + apply(*health) +} + +type healthOptionFunc func(*health) + +func (fn healthOptionFunc) apply(h *health) { + fn(h) +} + +// WithCheckListeners allows you to listen to check start/end events +func WithCheckListeners(listener ...CheckListener) HealthOption { + return healthOptionFunc(func(h *health) { + h.checksListener = listener + }) +} + +// WithHealthListeners allows you to listen to overall results change +func WithHealthListeners(listener ...HealthListener) HealthOption { + return healthOptionFunc(func(h *health) { + h.healthListener = listener + }) +} + +// WithDefaults sets all the Health object settings. It's not required to use this as no options is always default +// This is a simple placeholder for any future defaults +func WithDefaults() HealthOption { + return healthOptionFunc(func(h *health) {}) +} + +// CheckOption configures a health check using the functional options paradigm +// popularized by Rob Pike and Dave Cheney. +// If you're unfamiliar with this style, see: +// - https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html +// - https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis. +// - https://sagikazarmark.hu/blog/functional-options-on-steroids/ +type CheckOption interface { + applyCheck(*checkConfig) +} + +// Option configures a health checker or a health check using the functional options paradigm +// popularized by Rob Pike and Dave Cheney. +// If you're unfamiliar with this style, see: +// - https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html +// - https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis. +// - https://sagikazarmark.hu/blog/functional-options-on-steroids/ +type Option interface { + HealthOption + CheckOption +} + +type executionPeriod time.Duration + +func (o executionPeriod) apply(h *health) { + h.defaultExecutionPeriod = time.Duration(o) +} + +func (o executionPeriod) applyCheck(c *checkConfig) { + c.executionPeriod = time.Duration(o) +} + +// ExecutionPeriod is the period between successive executions. +func ExecutionPeriod(d time.Duration) Option { + return executionPeriod(d) +} + +type initialDelay time.Duration + +func (o initialDelay) apply(h *health) { + h.defaultInitialDelay = time.Duration(o) +} + +func (o initialDelay) applyCheck(c *checkConfig) { + c.initialDelay = time.Duration(o) +} + +// InitialDelay is the time to delay first execution; defaults to zero. +func InitialDelay(d time.Duration) Option { + return initialDelay(d) +} + +type initiallyPassing bool + +func (o initiallyPassing) apply(h *health) { + h.defaultInitiallyPassing = bool(o) +} + +func (o initiallyPassing) applyCheck(c *checkConfig) { + c.initiallyPassing = bool(o) +} + +// InitiallyPassing indicates when true, the check will be treated as passing before the first run; defaults to false +func InitiallyPassing(b bool) Option { + return initiallyPassing(b) +} + +type executionTimeout time.Duration + +func (o executionTimeout) applyCheck(c *checkConfig) { + c.executionTimeout = time.Duration(o) +} + +// ExecutionTimeout sets the timeout of the check. +// It is up to the check to respect the timeout, which is provided via the Context argument of `Check.Execute` method. +// Defaults to no timeout +func ExecutionTimeout(d time.Duration) CheckOption { + return executionTimeout(d) +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/types.go b/vendor/github.com/AppsFlyer/go-sundheit/types.go new file mode 100644 index 00000000..ed31bf1d --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/types.go @@ -0,0 +1,67 @@ +package gosundheit + +import ( + "fmt" + "time" + + "github.com/pkg/errors" +) + +const ( + maxExpectedChecks = 16 + initialResultMsg = "didn't run yet" + // ValAllChecks is the value used for the check tags when tagging all tests + ValAllChecks = "all_checks" +) + +// Result represents the output of a health check execution. +type Result struct { + // the details of task Result - may be nil + Details interface{} `json:"message,omitempty"` + // the error returned from a failed health check - nil when successful + Error error `json:"error,omitempty"` + // the time of the last health check + Timestamp time.Time `json:"timestamp"` + // the execution duration of the last check + Duration time.Duration `json:"duration,omitempty"` + // the number of failures that occurred in a row + ContiguousFailures int64 `json:"contiguousFailures"` + // the time of the initial transitional failure + TimeOfFirstFailure *time.Time `json:"timeOfFirstFailure"` +} + +// IsHealthy returns true iff the check result snapshot was a success +func (r Result) IsHealthy() bool { + return r.Error == nil +} + +func (r Result) String() string { + return fmt.Sprintf("Result{details: %s, err: %s, time: %s, contiguousFailures: %d, timeOfFirstFailure:%s}", + r.Details, r.Error, r.Timestamp, r.ContiguousFailures, r.TimeOfFirstFailure) +} + +type marshalableError struct { + Message string `json:"message,omitempty"` + Cause error `json:"cause,omitempty"` +} + +func newMarshalableError(err error) error { + if err == nil { + return nil + } + + mr := &marshalableError{ + Message: err.Error(), + } + + cause := errors.Cause(err) + if cause != err { + mr.Cause = newMarshalableError(cause) + } + + return mr +} + +func (e *marshalableError) Error() string { + return e.Message +} diff --git a/vendor/github.com/AppsFlyer/go-sundheit/utils.go b/vendor/github.com/AppsFlyer/go-sundheit/utils.go new file mode 100644 index 00000000..444995f2 --- /dev/null +++ b/vendor/github.com/AppsFlyer/go-sundheit/utils.go @@ -0,0 +1,19 @@ +package gosundheit + +func allHealthy(results map[string]Result) (healthy bool) { + for _, v := range results { + if !v.IsHealthy() { + return false + } + } + + return true +} + +func copyResultsMap(results map[string]Result) map[string]Result { + newMap := make(map[string]Result, len(results)) + for k, v := range results { + newMap[k] = v + } + return newMap +} diff --git a/vendor/github.com/Azure/go-ntlmssp/.travis.yml b/vendor/github.com/Azure/go-ntlmssp/.travis.yml new file mode 100644 index 00000000..23c95fe9 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/.travis.yml @@ -0,0 +1,17 @@ +sudo: false + +language: go + +before_script: + - go get -u golang.org/x/lint/golint + +go: + - 1.10.x + - master + +script: + - test -z "$(gofmt -s -l . | tee /dev/stderr)" + - test -z "$(golint ./... | tee /dev/stderr)" + - go vet ./... + - go build -v ./... + - go test -v ./... diff --git a/vendor/github.com/Azure/go-ntlmssp/BUILD b/vendor/github.com/Azure/go-ntlmssp/BUILD new file mode 100644 index 00000000..3bd3306b --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go-ntlmssp", + srcs = [ + "authenticate_message.go", + "authheader.go", + "avids.go", + "challenge_message.go", + "messageheader.go", + "negotiate_flags.go", + "negotiate_message.go", + "negotiator.go", + "nlmp.go", + "unicode.go", + "varfield.go", + "version.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/Azure/go-ntlmssp", + importpath = "github.com/Azure/go-ntlmssp", + visibility = ["//visibility:public"], + deps = ["//vendor/golang.org/x/crypto/md4"], +) diff --git a/vendor/github.com/Azure/go-ntlmssp/LICENSE b/vendor/github.com/Azure/go-ntlmssp/LICENSE new file mode 100644 index 00000000..dc1cf39d --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Azure/go-ntlmssp/README.md b/vendor/github.com/Azure/go-ntlmssp/README.md new file mode 100644 index 00000000..55cdcefa --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/README.md @@ -0,0 +1,29 @@ +# go-ntlmssp +Golang package that provides NTLM/Negotiate authentication over HTTP + +[![GoDoc](https://godoc.org/github.com/Azure/go-ntlmssp?status.svg)](https://godoc.org/github.com/Azure/go-ntlmssp) [![Build Status](https://travis-ci.org/Azure/go-ntlmssp.svg?branch=dev)](https://travis-ci.org/Azure/go-ntlmssp) + +Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx +Implementation hints from http://davenport.sourceforge.net/ntlm.html + +This package only implements authentication, no key exchange or encryption. It +only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding. +This package implements NTLMv2. + +# Usage + +``` +url, user, password := "http://www.example.com/secrets", "robpike", "pw123" +client := &http.Client{ + Transport: ntlmssp.Negotiator{ + RoundTripper:&http.Transport{}, + }, +} + +req, _ := http.NewRequest("GET", url, nil) +req.SetBasicAuth(user, password) +res, _ := client.Do(req) +``` + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ntlmssp/SECURITY.md b/vendor/github.com/Azure/go-ntlmssp/SECURITY.md new file mode 100644 index 00000000..e138ec5d --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go new file mode 100644 index 00000000..ab183db6 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go @@ -0,0 +1,187 @@ +package ntlmssp + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "encoding/hex" + "errors" + "strings" + "time" +) + +type authenicateMessage struct { + LmChallengeResponse []byte + NtChallengeResponse []byte + + TargetName string + UserName string + + // only set if negotiateFlag_NTLMSSP_NEGOTIATE_KEY_EXCH + EncryptedRandomSessionKey []byte + + NegotiateFlags negotiateFlags + + MIC []byte +} + +type authenticateMessageFields struct { + messageHeader + LmChallengeResponse varField + NtChallengeResponse varField + TargetName varField + UserName varField + Workstation varField + _ [8]byte + NegotiateFlags negotiateFlags +} + +func (m authenicateMessage) MarshalBinary() ([]byte, error) { + if !m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE) { + return nil, errors.New("Only unicode is supported") + } + + target, user := toUnicode(m.TargetName), toUnicode(m.UserName) + workstation := toUnicode("") + + ptr := binary.Size(&authenticateMessageFields{}) + f := authenticateMessageFields{ + messageHeader: newMessageHeader(3), + NegotiateFlags: m.NegotiateFlags, + LmChallengeResponse: newVarField(&ptr, len(m.LmChallengeResponse)), + NtChallengeResponse: newVarField(&ptr, len(m.NtChallengeResponse)), + TargetName: newVarField(&ptr, len(target)), + UserName: newVarField(&ptr, len(user)), + Workstation: newVarField(&ptr, len(workstation)), + } + + f.NegotiateFlags.Unset(negotiateFlagNTLMSSPNEGOTIATEVERSION) + + b := bytes.Buffer{} + if err := binary.Write(&b, binary.LittleEndian, &f); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &m.LmChallengeResponse); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &m.NtChallengeResponse); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &target); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &user); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &workstation); err != nil { + return nil, err + } + + return b.Bytes(), nil +} + +//ProcessChallenge crafts an AUTHENTICATE message in response to the CHALLENGE message +//that was received from the server +func ProcessChallenge(challengeMessageData []byte, user, password string, domainNeeded bool) ([]byte, error) { + if user == "" && password == "" { + return nil, errors.New("Anonymous authentication not supported") + } + + var cm challengeMessage + if err := cm.UnmarshalBinary(challengeMessageData); err != nil { + return nil, err + } + + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) { + return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)") + } + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) { + return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)") + } + + if !domainNeeded { + cm.TargetName = "" + } + + am := authenicateMessage{ + UserName: user, + TargetName: cm.TargetName, + NegotiateFlags: cm.NegotiateFlags, + } + + timestamp := cm.TargetInfo[avIDMsvAvTimestamp] + if timestamp == nil { // no time sent, take current time + ft := uint64(time.Now().UnixNano()) / 100 + ft += 116444736000000000 // add time between unix & windows offset + timestamp = make([]byte, 8) + binary.LittleEndian.PutUint64(timestamp, ft) + } + + clientChallenge := make([]byte, 8) + rand.Reader.Read(clientChallenge) + + ntlmV2Hash := getNtlmV2Hash(password, user, cm.TargetName) + + am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw) + + if cm.TargetInfoRaw == nil { + am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge) + } + return am.MarshalBinary() +} + +func ProcessChallengeWithHash(challengeMessageData []byte, user, hash string) ([]byte, error) { + if user == "" && hash == "" { + return nil, errors.New("Anonymous authentication not supported") + } + + var cm challengeMessage + if err := cm.UnmarshalBinary(challengeMessageData); err != nil { + return nil, err + } + + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) { + return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)") + } + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) { + return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)") + } + + am := authenicateMessage{ + UserName: user, + TargetName: cm.TargetName, + NegotiateFlags: cm.NegotiateFlags, + } + + timestamp := cm.TargetInfo[avIDMsvAvTimestamp] + if timestamp == nil { // no time sent, take current time + ft := uint64(time.Now().UnixNano()) / 100 + ft += 116444736000000000 // add time between unix & windows offset + timestamp = make([]byte, 8) + binary.LittleEndian.PutUint64(timestamp, ft) + } + + clientChallenge := make([]byte, 8) + rand.Reader.Read(clientChallenge) + + hashParts := strings.Split(hash, ":") + if len(hashParts) > 1 { + hash = hashParts[1] + } + hashBytes, err := hex.DecodeString(hash) + if err != nil { + return nil, err + } + ntlmV2Hash := hmacMd5(hashBytes, toUnicode(strings.ToUpper(user)+cm.TargetName)) + + am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw) + + if cm.TargetInfoRaw == nil { + am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge) + } + return am.MarshalBinary() +} diff --git a/vendor/github.com/Azure/go-ntlmssp/authheader.go b/vendor/github.com/Azure/go-ntlmssp/authheader.go new file mode 100644 index 00000000..c9d30d32 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/authheader.go @@ -0,0 +1,66 @@ +package ntlmssp + +import ( + "encoding/base64" + "strings" +) + +type authheader []string + +func (h authheader) IsBasic() bool { + for _, s := range h { + if strings.HasPrefix(string(s), "Basic ") { + return true + } + } + return false +} + +func (h authheader) Basic() string { + for _, s := range h { + if strings.HasPrefix(string(s), "Basic ") { + return s + } + } + return "" +} + +func (h authheader) IsNegotiate() bool { + for _, s := range h { + if strings.HasPrefix(string(s), "Negotiate") { + return true + } + } + return false +} + +func (h authheader) IsNTLM() bool { + for _, s := range h { + if strings.HasPrefix(string(s), "NTLM") { + return true + } + } + return false +} + +func (h authheader) GetData() ([]byte, error) { + for _, s := range h { + if strings.HasPrefix(string(s), "NTLM") || strings.HasPrefix(string(s), "Negotiate") || strings.HasPrefix(string(s), "Basic ") { + p := strings.Split(string(s), " ") + if len(p) < 2 { + return nil, nil + } + return base64.StdEncoding.DecodeString(string(p[1])) + } + } + return nil, nil +} + +func (h authheader) GetBasicCreds() (username, password string, err error) { + d, err := h.GetData() + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(d), ":", 2) + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/avids.go b/vendor/github.com/Azure/go-ntlmssp/avids.go new file mode 100644 index 00000000..196b5f13 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/avids.go @@ -0,0 +1,17 @@ +package ntlmssp + +type avID uint16 + +const ( + avIDMsvAvEOL avID = iota + avIDMsvAvNbComputerName + avIDMsvAvNbDomainName + avIDMsvAvDNSComputerName + avIDMsvAvDNSDomainName + avIDMsvAvDNSTreeName + avIDMsvAvFlags + avIDMsvAvTimestamp + avIDMsvAvSingleHost + avIDMsvAvTargetName + avIDMsvChannelBindings +) diff --git a/vendor/github.com/Azure/go-ntlmssp/challenge_message.go b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go new file mode 100644 index 00000000..053b55e4 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go @@ -0,0 +1,82 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "fmt" +) + +type challengeMessageFields struct { + messageHeader + TargetName varField + NegotiateFlags negotiateFlags + ServerChallenge [8]byte + _ [8]byte + TargetInfo varField +} + +func (m challengeMessageFields) IsValid() bool { + return m.messageHeader.IsValid() && m.MessageType == 2 +} + +type challengeMessage struct { + challengeMessageFields + TargetName string + TargetInfo map[avID][]byte + TargetInfoRaw []byte +} + +func (m *challengeMessage) UnmarshalBinary(data []byte) error { + r := bytes.NewReader(data) + err := binary.Read(r, binary.LittleEndian, &m.challengeMessageFields) + if err != nil { + return err + } + if !m.challengeMessageFields.IsValid() { + return fmt.Errorf("Message is not a valid challenge message: %+v", m.challengeMessageFields.messageHeader) + } + + if m.challengeMessageFields.TargetName.Len > 0 { + m.TargetName, err = m.challengeMessageFields.TargetName.ReadStringFrom(data, m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE)) + if err != nil { + return err + } + } + + if m.challengeMessageFields.TargetInfo.Len > 0 { + d, err := m.challengeMessageFields.TargetInfo.ReadFrom(data) + m.TargetInfoRaw = d + if err != nil { + return err + } + m.TargetInfo = make(map[avID][]byte) + r := bytes.NewReader(d) + for { + var id avID + var l uint16 + err = binary.Read(r, binary.LittleEndian, &id) + if err != nil { + return err + } + if id == avIDMsvAvEOL { + break + } + + err = binary.Read(r, binary.LittleEndian, &l) + if err != nil { + return err + } + value := make([]byte, l) + n, err := r.Read(value) + if err != nil { + return err + } + if n != int(l) { + return fmt.Errorf("Expected to read %d bytes, got only %d", l, n) + } + m.TargetInfo[id] = value + } + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/messageheader.go b/vendor/github.com/Azure/go-ntlmssp/messageheader.go new file mode 100644 index 00000000..247e2846 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/messageheader.go @@ -0,0 +1,21 @@ +package ntlmssp + +import ( + "bytes" +) + +var signature = [8]byte{'N', 'T', 'L', 'M', 'S', 'S', 'P', 0} + +type messageHeader struct { + Signature [8]byte + MessageType uint32 +} + +func (h messageHeader) IsValid() bool { + return bytes.Equal(h.Signature[:], signature[:]) && + h.MessageType > 0 && h.MessageType < 4 +} + +func newMessageHeader(messageType uint32) messageHeader { + return messageHeader{signature, messageType} +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go new file mode 100644 index 00000000..5905c023 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go @@ -0,0 +1,52 @@ +package ntlmssp + +type negotiateFlags uint32 + +const ( + /*A*/ negotiateFlagNTLMSSPNEGOTIATEUNICODE negotiateFlags = 1 << 0 + /*B*/ negotiateFlagNTLMNEGOTIATEOEM = 1 << 1 + /*C*/ negotiateFlagNTLMSSPREQUESTTARGET = 1 << 2 + + /*D*/ + negotiateFlagNTLMSSPNEGOTIATESIGN = 1 << 4 + /*E*/ negotiateFlagNTLMSSPNEGOTIATESEAL = 1 << 5 + /*F*/ negotiateFlagNTLMSSPNEGOTIATEDATAGRAM = 1 << 6 + /*G*/ negotiateFlagNTLMSSPNEGOTIATELMKEY = 1 << 7 + + /*H*/ + negotiateFlagNTLMSSPNEGOTIATENTLM = 1 << 9 + + /*J*/ + negotiateFlagANONYMOUS = 1 << 11 + /*K*/ negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED = 1 << 12 + /*L*/ negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED = 1 << 13 + + /*M*/ + negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN = 1 << 15 + /*N*/ negotiateFlagNTLMSSPTARGETTYPEDOMAIN = 1 << 16 + /*O*/ negotiateFlagNTLMSSPTARGETTYPESERVER = 1 << 17 + + /*P*/ + negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY = 1 << 19 + /*Q*/ negotiateFlagNTLMSSPNEGOTIATEIDENTIFY = 1 << 20 + + /*R*/ + negotiateFlagNTLMSSPREQUESTNONNTSESSIONKEY = 1 << 22 + /*S*/ negotiateFlagNTLMSSPNEGOTIATETARGETINFO = 1 << 23 + + /*T*/ + negotiateFlagNTLMSSPNEGOTIATEVERSION = 1 << 25 + + /*U*/ + negotiateFlagNTLMSSPNEGOTIATE128 = 1 << 29 + /*V*/ negotiateFlagNTLMSSPNEGOTIATEKEYEXCH = 1 << 30 + /*W*/ negotiateFlagNTLMSSPNEGOTIATE56 = 1 << 31 +) + +func (field negotiateFlags) Has(flags negotiateFlags) bool { + return field&flags == flags +} + +func (field *negotiateFlags) Unset(flags negotiateFlags) { + *field = *field ^ (*field & flags) +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go new file mode 100644 index 00000000..e466a986 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go @@ -0,0 +1,64 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "errors" + "strings" +) + +const expMsgBodyLen = 40 + +type negotiateMessageFields struct { + messageHeader + NegotiateFlags negotiateFlags + + Domain varField + Workstation varField + + Version +} + +var defaultFlags = negotiateFlagNTLMSSPNEGOTIATETARGETINFO | + negotiateFlagNTLMSSPNEGOTIATE56 | + negotiateFlagNTLMSSPNEGOTIATE128 | + negotiateFlagNTLMSSPNEGOTIATEUNICODE | + negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY + +//NewNegotiateMessage creates a new NEGOTIATE message with the +//flags that this package supports. +func NewNegotiateMessage(domainName, workstationName string) ([]byte, error) { + payloadOffset := expMsgBodyLen + flags := defaultFlags + + if domainName != "" { + flags |= negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED + } + + if workstationName != "" { + flags |= negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED + } + + msg := negotiateMessageFields{ + messageHeader: newMessageHeader(1), + NegotiateFlags: flags, + Domain: newVarField(&payloadOffset, len(domainName)), + Workstation: newVarField(&payloadOffset, len(workstationName)), + Version: DefaultVersion(), + } + + b := bytes.Buffer{} + if err := binary.Write(&b, binary.LittleEndian, &msg); err != nil { + return nil, err + } + if b.Len() != expMsgBodyLen { + return nil, errors.New("incorrect body length") + } + + payload := strings.ToUpper(domainName + workstationName) + if _, err := b.WriteString(payload); err != nil { + return nil, err + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiator.go b/vendor/github.com/Azure/go-ntlmssp/negotiator.go new file mode 100644 index 00000000..cce4955d --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiator.go @@ -0,0 +1,151 @@ +package ntlmssp + +import ( + "bytes" + "encoding/base64" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// GetDomain : parse domain name from based on slashes in the input +// Need to check for upn as well +func GetDomain(user string) (string, string, bool) { + domain := "" + domainNeeded := false + + if strings.Contains(user, "\\") { + ucomponents := strings.SplitN(user, "\\", 2) + domain = ucomponents[0] + user = ucomponents[1] + domainNeeded = true + } else if strings.Contains(user, "@") { + domainNeeded = false + } else { + domainNeeded = true + } + return user, domain, domainNeeded +} + +//Negotiator is a http.Roundtripper decorator that automatically +//converts basic authentication to NTLM/Negotiate authentication when appropriate. +type Negotiator struct{ http.RoundTripper } + +//RoundTrip sends the request to the server, handling any authentication +//re-sends as needed. +func (l Negotiator) RoundTrip(req *http.Request) (res *http.Response, err error) { + // Use default round tripper if not provided + rt := l.RoundTripper + if rt == nil { + rt = http.DefaultTransport + } + // If it is not basic auth, just round trip the request as usual + reqauth := authheader(req.Header.Values("Authorization")) + if !reqauth.IsBasic() { + return rt.RoundTrip(req) + } + reqauthBasic := reqauth.Basic() + // Save request body + body := bytes.Buffer{} + if req.Body != nil { + _, err = body.ReadFrom(req.Body) + if err != nil { + return nil, err + } + + req.Body.Close() + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + } + // first try anonymous, in case the server still finds us + // authenticated from previous traffic + req.Header.Del("Authorization") + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusUnauthorized { + return res, err + } + resauth := authheader(res.Header.Values("Www-Authenticate")) + if !resauth.IsNegotiate() && !resauth.IsNTLM() { + // Unauthorized, Negotiate not requested, let's try with basic auth + req.Header.Set("Authorization", string(reqauthBasic)) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusUnauthorized { + return res, err + } + resauth = authheader(res.Header.Values("Www-Authenticate")) + } + + if resauth.IsNegotiate() || resauth.IsNTLM() { + // 401 with request:Basic and response:Negotiate + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + // recycle credentials + u, p, err := reqauth.GetBasicCreds() + if err != nil { + return nil, err + } + + // get domain from username + domain := "" + u, domain, domainNeeded := GetDomain(u) + + // send negotiate + negotiateMessage, err := NewNegotiateMessage(domain, "") + if err != nil { + return nil, err + } + if resauth.IsNTLM() { + req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(negotiateMessage)) + } else { + req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(negotiateMessage)) + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + + // receive challenge? + resauth = authheader(res.Header.Values("Www-Authenticate")) + challengeMessage, err := resauth.GetData() + if err != nil { + return nil, err + } + if !(resauth.IsNegotiate() || resauth.IsNTLM()) || len(challengeMessage) == 0 { + // Negotiation failed, let client deal with response + return res, nil + } + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + // send authenticate + authenticateMessage, err := ProcessChallenge(challengeMessage, u, p, domainNeeded) + if err != nil { + return nil, err + } + if resauth.IsNTLM() { + req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(authenticateMessage)) + } else { + req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(authenticateMessage)) + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + return rt.RoundTrip(req) + } + + return res, err +} diff --git a/vendor/github.com/Azure/go-ntlmssp/nlmp.go b/vendor/github.com/Azure/go-ntlmssp/nlmp.go new file mode 100644 index 00000000..1e65abe8 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/nlmp.go @@ -0,0 +1,51 @@ +// Package ntlmssp provides NTLM/Negotiate authentication over HTTP +// +// Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx, +// implementation hints from http://davenport.sourceforge.net/ntlm.html . +// This package only implements authentication, no key exchange or encryption. It +// only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding. +// This package implements NTLMv2. +package ntlmssp + +import ( + "crypto/hmac" + "crypto/md5" + "golang.org/x/crypto/md4" + "strings" +) + +func getNtlmV2Hash(password, username, target string) []byte { + return hmacMd5(getNtlmHash(password), toUnicode(strings.ToUpper(username)+target)) +} + +func getNtlmHash(password string) []byte { + hash := md4.New() + hash.Write(toUnicode(password)) + return hash.Sum(nil) +} + +func computeNtlmV2Response(ntlmV2Hash, serverChallenge, clientChallenge, + timestamp, targetInfo []byte) []byte { + + temp := []byte{1, 1, 0, 0, 0, 0, 0, 0} + temp = append(temp, timestamp...) + temp = append(temp, clientChallenge...) + temp = append(temp, 0, 0, 0, 0) + temp = append(temp, targetInfo...) + temp = append(temp, 0, 0, 0, 0) + + NTProofStr := hmacMd5(ntlmV2Hash, serverChallenge, temp) + return append(NTProofStr, temp...) +} + +func computeLmV2Response(ntlmV2Hash, serverChallenge, clientChallenge []byte) []byte { + return append(hmacMd5(ntlmV2Hash, serverChallenge, clientChallenge), clientChallenge...) +} + +func hmacMd5(key []byte, data ...[]byte) []byte { + mac := hmac.New(md5.New, key) + for _, d := range data { + mac.Write(d) + } + return mac.Sum(nil) +} diff --git a/vendor/github.com/Azure/go-ntlmssp/unicode.go b/vendor/github.com/Azure/go-ntlmssp/unicode.go new file mode 100644 index 00000000..7b4f4716 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/unicode.go @@ -0,0 +1,29 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "errors" + "unicode/utf16" +) + +// helper func's for dealing with Windows Unicode (UTF16LE) + +func fromUnicode(d []byte) (string, error) { + if len(d)%2 > 0 { + return "", errors.New("Unicode (UTF 16 LE) specified, but uneven data length") + } + s := make([]uint16, len(d)/2) + err := binary.Read(bytes.NewReader(d), binary.LittleEndian, &s) + if err != nil { + return "", err + } + return string(utf16.Decode(s)), nil +} + +func toUnicode(s string) []byte { + uints := utf16.Encode([]rune(s)) + b := bytes.Buffer{} + binary.Write(&b, binary.LittleEndian, &uints) + return b.Bytes() +} diff --git a/vendor/github.com/Azure/go-ntlmssp/varfield.go b/vendor/github.com/Azure/go-ntlmssp/varfield.go new file mode 100644 index 00000000..15f9aa11 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/varfield.go @@ -0,0 +1,40 @@ +package ntlmssp + +import ( + "errors" +) + +type varField struct { + Len uint16 + MaxLen uint16 + BufferOffset uint32 +} + +func (f varField) ReadFrom(buffer []byte) ([]byte, error) { + if len(buffer) < int(f.BufferOffset+uint32(f.Len)) { + return nil, errors.New("Error reading data, varField extends beyond buffer") + } + return buffer[f.BufferOffset : f.BufferOffset+uint32(f.Len)], nil +} + +func (f varField) ReadStringFrom(buffer []byte, unicode bool) (string, error) { + d, err := f.ReadFrom(buffer) + if err != nil { + return "", err + } + if unicode { // UTF-16LE encoding scheme + return fromUnicode(d) + } + // OEM encoding, close enough to ASCII, since no code page is specified + return string(d), err +} + +func newVarField(ptr *int, fieldsize int) varField { + f := varField{ + Len: uint16(fieldsize), + MaxLen: uint16(fieldsize), + BufferOffset: uint32(*ptr), + } + *ptr += fieldsize + return f +} diff --git a/vendor/github.com/Azure/go-ntlmssp/version.go b/vendor/github.com/Azure/go-ntlmssp/version.go new file mode 100644 index 00000000..6d848921 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/version.go @@ -0,0 +1,20 @@ +package ntlmssp + +// Version is a struct representing https://msdn.microsoft.com/en-us/library/cc236654.aspx +type Version struct { + ProductMajorVersion uint8 + ProductMinorVersion uint8 + ProductBuild uint16 + _ [3]byte + NTLMRevisionCurrent uint8 +} + +// DefaultVersion returns a Version with "sensible" defaults (Windows 7) +func DefaultVersion() Version { + return Version{ + ProductMajorVersion: 6, + ProductMinorVersion: 1, + ProductBuild: 7601, + NTLMRevisionCurrent: 15, + } +} diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml new file mode 100644 index 00000000..4025e01e --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +script: + - go test -v + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/BUILD b/vendor/github.com/Masterminds/goutils/BUILD new file mode 100644 index 00000000..583ec61e --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "goutils", + srcs = [ + "cryptorandomstringutils.go", + "randomstringutils.go", + "stringutils.go", + "wordutils.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/Masterminds/goutils", + importpath = "github.com/Masterminds/goutils", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md new file mode 100644 index 00000000..d700ec47 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md @@ -0,0 +1,8 @@ +# 1.0.1 (2017-05-31) + +## Fixed +- #21: Fix generation of alphanumeric strings (thanks @dbarranco) + +# 1.0.0 (2014-04-30) + +- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md new file mode 100644 index 00000000..163ffe72 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/README.md @@ -0,0 +1,70 @@ +GoUtils +=========== +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) + + +GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some +string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: +* WordUtils +* RandomStringUtils +* StringUtils (partial implementation) + +## Installation +If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: + + go get github.com/Masterminds/goutils + +If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. + + +## Documentation +GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) + + +## Usage +The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + } +Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + + } + +## License +GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. + +## Issue Reporting +Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues + +## Website +* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml new file mode 100644 index 00000000..657564a8 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/appveyor.yml @@ -0,0 +1,21 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\goutils +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +build: off + +install: + - go version + - go env + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go new file mode 100644 index 00000000..8dbd9248 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go @@ -0,0 +1,230 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "unicode" +) + +/* +CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNonAlphaNumeric(count int) (string, error) { + return CryptoRandomAlphaNumericCustom(count, false, false) +} + +/* +CryptoRandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAscii(count int) (string, error) { + return CryptoRandom(count, 32, 127, false, false) +} + +/* +CryptoRandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, false, true) +} + +/* +CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphabetic(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, false) +} + +/* +CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, true) +} + +/* +CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return CryptoRandom(count, 0, 0, letters, numbers) +} + +/* +CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(getCryptoRandomInt(gap) + int64(start)) + } else { + ch = chars[getCryptoRandomInt(gap)+int64(start)] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + getCryptoRandomInt(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + getCryptoRandomInt(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} + +func getCryptoRandomInt(count int) int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) + if err != nil { + panic(err) + } + return nBig.Int64() +} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go new file mode 100644 index 00000000..27267023 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go @@ -0,0 +1,248 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "fmt" + "math" + "math/rand" + "time" + "unicode" +) + +// RANDOM provides the time-based seed used to generate random numbers +var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) + +/* +RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNonAlphaNumeric(count int) (string, error) { + return RandomAlphaNumericCustom(count, false, false) +} + +/* +RandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAscii(count int) (string, error) { + return Random(count, 32, 127, false, false) +} + +/* +RandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNumeric(count int) (string, error) { + return Random(count, 0, 0, false, true) +} + +/* +RandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alphabetic characters. + +Parameters: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphabetic(count int) (string, error) { + return Random(count, 0, 0, true, false) +} + +/* +RandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumeric(count int) (string, error) { + return Random(count, 0, 0, true, true) +} + +/* +RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return Random(count, 0, 0, letters, numbers) +} + +/* +Random creates a random string based on a variety of options, using default source of randomness. +This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but +instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) +} + +/* +RandomSeed creates a random string based on a variety of options, using supplied source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. +This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance +with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode decimals) to start at + end - the position in set of chars (ASCII/Unicode decimals) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + random - a source of randomness. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { + + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(random.Intn(gap) + start) + } else { + ch = chars[random.Intn(gap)+start] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + random.Intn(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + random.Intn(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go new file mode 100644 index 00000000..741bb530 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/stringutils.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "bytes" + "fmt" + "strings" + "unicode" +) + +// Typically returned by functions where a searched item cannot be found +const INDEX_NOT_FOUND = -1 + +/* +Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." + +Specifically, the algorithm is as follows: + + - If str is less than maxWidth characters long, return it. + - Else abbreviate it to (str[0:maxWidth - 3] + "..."). + - If maxWidth is less than 4, return an illegal argument error. + - In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func Abbreviate(str string, maxWidth int) (string, error) { + return AbbreviateFull(str, 0, maxWidth) +} + +/* +AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." +This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not +necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear +somewhere in the result. +In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + offset - left edge of source string + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { + if str == "" { + return "", nil + } + if maxWidth < 4 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") + return "", err + } + if len(str) <= maxWidth { + return str, nil + } + if offset > len(str) { + offset = len(str) + } + if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 + offset = len(str) - (maxWidth - 3) + } + abrevMarker := "..." + if offset <= 4 { + return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; + } + if maxWidth < 7 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") + return "", err + } + if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 + abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) + return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); + } + return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); +} + +/* +DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). +It returns the string without whitespaces. + +Parameter: + str - the string to delete whitespace from, may be nil + +Returns: + the string without whitespaces +*/ +func DeleteWhiteSpace(str string) string { + if str == "" { + return str + } + sz := len(str) + var chs bytes.Buffer + count := 0 + for i := 0; i < sz; i++ { + ch := rune(str[i]) + if !unicode.IsSpace(ch) { + chs.WriteRune(ch) + count++ + } + } + if count == sz { + return str + } + return chs.String() +} + +/* +IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. + +Parameters: + str1 - the first string + str2 - the second string + +Returns: + the index where str1 and str2 begin to differ; -1 if they are equal +*/ +func IndexOfDifference(str1 string, str2 string) int { + if str1 == str2 { + return INDEX_NOT_FOUND + } + if IsEmpty(str1) || IsEmpty(str2) { + return 0 + } + var i int + for i = 0; i < len(str1) && i < len(str2); i++ { + if rune(str1[i]) != rune(str2[i]) { + break + } + } + if i < len(str2) || i < len(str1) { + return i + } + return INDEX_NOT_FOUND +} + +/* +IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: + + goutils.IsBlank("") = true + goutils.IsBlank(" ") = true + goutils.IsBlank("bob") = false + goutils.IsBlank(" bob ") = false + +Parameter: + str - the string to check + +Returns: + true - if the string is whitespace or empty ("") +*/ +func IsBlank(str string) bool { + strLen := len(str) + if str == "" || strLen == 0 { + return true + } + for i := 0; i < strLen; i++ { + if unicode.IsSpace(rune(str[i])) == false { + return false + } + } + return true +} + +/* +IndexOf returns the index of the first instance of sub in str, with the search beginning from the +index start point specified. -1 is returned if sub is not present in str. + +An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. +A start position greater than the string length returns -1. + +Parameters: + str - the string to check + sub - the substring to find + start - the start position; negative treated as zero + +Returns: + the first index where the sub string was found (always >= start) +*/ +func IndexOf(str string, sub string, start int) int { + + if start < 0 { + start = 0 + } + + if len(str) < start { + return INDEX_NOT_FOUND + } + + if IsEmpty(str) || IsEmpty(sub) { + return INDEX_NOT_FOUND + } + + partialIndex := strings.Index(str[start:len(str)], sub) + if partialIndex == -1 { + return INDEX_NOT_FOUND + } + return partialIndex + start +} + +// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. +func IsEmpty(str string) bool { + return len(str) == 0 +} + +// Returns either the passed in string, or if the string is empty, the value of defaultStr. +func DefaultString(str string, defaultStr string) string { + if IsEmpty(str) { + return defaultStr + } + return str +} + +// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. +func DefaultIfBlank(str string, defaultStr string) string { + if IsBlank(str) { + return defaultStr + } + return str +} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go new file mode 100644 index 00000000..034cad8e --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/wordutils.go @@ -0,0 +1,357 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goutils provides utility functions to manipulate strings in various ways. +The code snippets below show examples of how to use goutils. Some functions return +errors while others do not, so usage would vary as a result. + +Example: + + package main + + import ( + "fmt" + "github.com/aokoli/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + } +*/ +package goutils + +import ( + "bytes" + "strings" + "unicode" +) + +// VERSION indicates the current version of goutils +const VERSION = "1.0.0" + +/* +Wrap wraps a single line of text, identifying words by ' '. +New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + +Returns: + a line with newlines inserted +*/ +func Wrap(str string, wrapLength int) string { + return WrapCustom(str, wrapLength, "", false) +} + +/* +WrapCustom wraps a single line of text, identifying words by ' '. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + newLineStr - the string to insert for a new line, "" uses '\n' + wrapLongWords - true if long words (such as URLs) should be wrapped + +Returns: + a line with newlines inserted +*/ +func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { + + if str == "" { + return "" + } + if newLineStr == "" { + newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons + } + if wrapLength < 1 { + wrapLength = 1 + } + + inputLineLength := len(str) + offset := 0 + + var wrappedLine bytes.Buffer + + for inputLineLength-offset > wrapLength { + + if rune(str[offset]) == ' ' { + offset++ + continue + } + + end := wrapLength + offset + 1 + spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset + + if spaceToWrapAt >= offset { + // normal word (not longer than wrapLength) + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + + } else { + // long word or URL + if wrapLongWords { + end := wrapLength + offset + // long words are wrapped one line at a time + wrappedLine.WriteString(str[offset:end]) + wrappedLine.WriteString(newLineStr) + offset += wrapLength + } else { + // long words aren't wrapped, just extended beyond limit + end := wrapLength + offset + index := strings.IndexRune(str[end:len(str)], ' ') + if index == -1 { + wrappedLine.WriteString(str[offset:len(str)]) + offset = inputLineLength + } else { + spaceToWrapAt = index + end + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + } + } + } + } + + wrappedLine.WriteString(str[offset:len(str)]) + + return wrappedLine.String() + +} + +/* +Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. +To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). +The delimiters represent a set of characters understood to separate words. The first string character +and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func Capitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + capitalizeNext := true + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + capitalizeNext = true + } else if capitalizeNext { + buffer[i] = unicode.ToTitle(ch) + capitalizeNext = false + } + } + return string(buffer) + +} + +/* +CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a +titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood +to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func CapitalizeFully(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + str = strings.ToLower(str) + return Capitalize(str, delimiters...) +} + +/* +Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. +The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter +character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to uncapitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + uncapitalized string +*/ +func Uncapitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + uncapitalizeNext = true + } else if uncapitalizeNext { + buffer[i] = unicode.ToLower(ch) + uncapitalizeNext = false + } + } + return string(buffer) +} + +/* +SwapCase swaps the case of a string using a word based algorithm. + +Conversion algorithm: + + Upper case character converts to Lower case + Title case character converts to Lower case + Lower case character after Whitespace or at start converts to Title case + Other Lower case character converts to Upper case + Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to swap case + +Returns: + the changed string +*/ +func SwapCase(str string) string { + if str == "" { + return str + } + buffer := []rune(str) + + whitespace := true + + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if unicode.IsUpper(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsTitle(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsLower(ch) { + if whitespace { + buffer[i] = unicode.ToTitle(ch) + whitespace = false + } else { + buffer[i] = unicode.ToUpper(ch) + } + } else { + whitespace = unicode.IsSpace(ch) + } + } + return string(buffer) +} + +/* +Initials extracts the initial letters from each word in the string. The first letter of the string and all first +letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters +parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. + +Parameters: + str - the string to get initials from + delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter +Returns: + string of initial letters +*/ +func Initials(str string, delimiters ...rune) string { + if str == "" { + return str + } + if delimiters != nil && len(delimiters) == 0 { + return "" + } + strLen := len(str) + var buf bytes.Buffer + lastWasGap := true + for i := 0; i < strLen; i++ { + ch := rune(str[i]) + + if isDelimiter(ch, delimiters...) { + lastWasGap = true + } else if lastWasGap { + buf.WriteRune(ch) + lastWasGap = false + } + } + return buf.String() +} + +// private function (lower case func name) +func isDelimiter(ch rune, delimiters ...rune) bool { + if delimiters == nil { + return unicode.IsSpace(ch) + } + for _, delimiter := range delimiters { + if ch == delimiter { + return true + } + } + return false +} diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml new file mode 100644 index 00000000..096369d4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/.travis.yml @@ -0,0 +1,29 @@ +language: go + +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - tip + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +script: + - make setup + - make test + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/semver/BUILD b/vendor/github.com/Masterminds/semver/BUILD new file mode 100644 index 00000000..e2b1d994 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "semver", + srcs = [ + "collection.go", + "constraints.go", + "doc.go", + "version.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/Masterminds/semver", + importpath = "github.com/Masterminds/semver", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md new file mode 100644 index 00000000..e405c9a8 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -0,0 +1,109 @@ +# 1.5.0 (2019-09-11) + +## Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +## Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +## Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +# 1.4.2 (2018-04-10) + +## Changed +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +## Fixed +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +# 1.4.1 (2018-04-02) + +## Fixed +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +# 1.4.0 (2017-10-04) + +## Changed +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +# 1.3.1 (2017-07-10) + +## Fixed +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +# 1.3.0 (2017-05-02) + +## Added +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +## Fixed +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +## Changed +- #55: The godoc icon moved from png to svg + +# 1.2.3 (2017-04-03) + +## Fixed +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +# Release 1.2.2 (2016-12-13) + +## Fixed +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +# Release 1.2.1 (2016-11-28) + +## Fixed +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +# Release 1.2.0 (2016-11-04) + +## Added +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +## Fixed +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +# Release 1.1.1 (2016-06-30) + +## Changed +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +# Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +# Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +# Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt new file mode 100644 index 00000000..9ff7da9c --- /dev/null +++ b/vendor/github.com/Masterminds/semver/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile new file mode 100644 index 00000000..a7a1b4e3 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/Makefile @@ -0,0 +1,36 @@ +.PHONY: setup +setup: + go get -u gopkg.in/alecthomas/gometalinter.v1 + gometalinter.v1 --install + +.PHONY: test +test: validate lint + @echo "==> Running tests" + go test -v + +.PHONY: validate +validate: + @echo "==> Running static validations" + @gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1 + +.PHONY: lint +lint: + @echo "==> Running linters" + @gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || : diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md new file mode 100644 index 00000000..1b52d2f4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/README.md @@ -0,0 +1,194 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + +```go + v, err := semver.NewVersion("1.2.3-beta.1+build345") +``` + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the [documentation](https://godoc.org/github.com/Masterminds/semver). + +## Sorting Semantic Versions + +A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) +package from the standard library. For example, + +```go + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) +``` + +## Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +## Working With Pre-release Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precidence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons without a pre-release comparator will skip pre-release versions. +For example, `>=1.2.3` will skip pre-releases when looking at a list of releases +while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +## Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +## Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +## Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +## Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +``` + +# Fuzzing + + [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing. + +1. `go-fuzz-build` +2. `go-fuzz -workdir=fuzz` + +# Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml new file mode 100644 index 00000000..b2778df1 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/appveyor.yml @@ -0,0 +1,44 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\semver +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +install: + - go version + - go env + - go get -u gopkg.in/alecthomas/gometalinter.v1 + - set PATH=%PATH%;%GOPATH%\bin + - gometalinter.v1.exe --install + +build_script: + - go install -v ./... + +test_script: + - "gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1" + - "gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || :" + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go new file mode 100644 index 00000000..a7823589 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go new file mode 100644 index 00000000..b94b9341 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -0,0 +1,423 @@ +package semver + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + cs := strings.Split(v, ",") + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if !c.check(v) { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if !c.check(v) { + em := fmt.Errorf(c.msg, v, c.orig) + e = append(e, em) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +var constraintOps map[string]cfunc +var constraintMsg map[string]string +var constraintRegex *regexp.Regexp + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + constraintMsg = map[string]string{ + "": "%s is not equal to %s", + "=": "%s is not equal to %s", + "!=": "%s is equal to %s", + ">": "%s is less than or equal to %s", + "<": "%s is greater than or equal to %s", + ">=": "%s is less than %s", + "=>": "%s is less than %s", + "<=": "%s is greater than %s", + "=<": "%s is greater than %s", + "~": "%s does not have same major and minor version as %s", + "~>": "%s does not have same major and minor version as %s", + "^": "%s does not have same major version as %s", + } + + ops := make([]string, 0, len(constraintOps)) + for k := range constraintOps { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) +} + +// An individual constraint +type constraint struct { + // The callback function for the restraint. It performs the logic for + // the constraint. + function cfunc + + msg string + + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) bool { + return c.function(v, c) +} + +type cfunc func(v *Version, c *constraint) bool + +func parseConstraint(c string) (*constraint, error) { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + ver := m[2] + orig := ver + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + function: constraintOps[m[1]], + msg: constraintMsg[m[1]], + con: con, + orig: orig, + minorDirty: minorDirty, + patchDirty: patchDirty, + dirty: dirty, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) bool { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.con.Major() != v.Major() { + return true + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true + } else if c.minorDirty { + return false + } + + return false + } + + return !v.Equal(c.con) +} + +func constraintGreaterThan(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) == 1 +} + +func constraintLessThan(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) < 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +func constraintGreaterThanEqual(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) >= 0 +} + +func constraintLessThanEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) <= 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true + } + + if v.Major() != c.con.Major() { + return false + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.dirty { + c.msg = constraintMsg["~"] + return constraintTilde(v, c) + } + + return v.Equal(c.con) +} + +// ^* --> (any) +// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0 +// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0 +// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0 +// ^1.2.3 --> >=1.2.3, <2.0.0 +// ^1.2.0 --> >=1.2.0, <2.0.0 +func constraintCaret(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + if v.Major() != c.con.Major() { + return false + } + + return true +} + +var constraintRangeRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go new file mode 100644 index 00000000..6a6c24c6 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/doc.go @@ -0,0 +1,115 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the documentation at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + + * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3, < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `~1.x` is equivalent to `>= 1, < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 00000000..6b061e61 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 00000000..c87d1c4b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,30 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - structcheck + - govet + - staticcheck + - deadcode + - errcheck + - varcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/vendor/github.com/Masterminds/semver/v3/BUILD b/vendor/github.com/Masterminds/semver/v3/BUILD new file mode 100644 index 00000000..b745807e --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "semver", + srcs = [ + "collection.go", + "constraints.go", + "doc.go", + "version.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/Masterminds/semver/v3", + importpath = "github.com/Masterminds/semver/v3", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 00000000..f1262642 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,214 @@ +# Changelog + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 00000000..9ff7da9c --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 00000000..eac19178 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,37 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint +GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build +GOFUZZ = $(GOPATH)/bin/go-fuzz + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: $(GOFUZZBUILD) $(GOFUZZ) + @echo "==> Fuzz testing" + $(GOFUZZBUILD) + $(GOFUZZ) -workdir=_fuzz + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + +$(GOFUZZBUILD): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build + +$(GOFUZZ): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 00000000..d8f54dcb --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,244 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +There are three major versions fo the `semver` package. + +* 3.x.x is the new stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the most widely used version with numerous tagged releases. This is the + previous stable and is still maintained for bug fixes. The development, to fix + bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 00000000..a7823589 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 00000000..203072e4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,594 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 00000000..74f97caa --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go new file mode 100644 index 00000000..a242ad70 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/fuzz.go @@ -0,0 +1,22 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + d := string(data) + + // Test NewVersion + _, _ = NewVersion(d) + + // Test StrictNewVersion + _, _ = StrictNewVersion(d) + + // Test NewConstraint + _, _ = NewConstraint(d) + + // The return value should be 0 normally, 1 if the priority in future tests + // should be increased, and -1 if future tests should skip passing in that + // data. We do not have a reason to change priority so 0 is always returned. + // There are example tests that do this. + return 0 +} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 00000000..7c4bed33 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,639 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go new file mode 100644 index 00000000..400d4f93 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version.go @@ -0,0 +1,425 @@ +package semver + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp +var validPrereleaseRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// SemVerRegex is the regular expression used to parse a semantic version. +const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// ValidPrerelease is the regular expression which validates +// both prerelease and metadata values. +const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch int64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") + validPrereleaseRegex = regexp.MustCompile(ValidPrerelease) +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var temp int64 + temp, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.major = temp + + if m[2] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.minor = temp + } else { + sv.minor = 0 + } + + if m[3] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.patch = temp + } else { + sv.patch = 0 + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v *Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v *Version) Major() int64 { + return v.major +} + +// Minor returns the minor version. +func (v *Version) Minor() int64 { + return v.minor +} + +// Patch returns the patch version. +func (v *Version) Patch() int64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v *Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v *Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v *Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps curent patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hypen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) { + return vNext, ErrInvalidPrerelease + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) { + return vNext, ErrInvalidMetadata + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + temp = nil + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v *Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func compareSegment(v, o int64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go new file mode 100644 index 00000000..b42bcd62 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version_fuzz.go @@ -0,0 +1,10 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + if _, err := NewVersion(string(data)); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore new file mode 100644 index 00000000..5e3002f8 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/BUILD b/vendor/github.com/Masterminds/sprig/v3/BUILD new file mode 100644 index 00000000..d2c10b19 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sprig", + srcs = [ + "crypto.go", + "date.go", + "defaults.go", + "dict.go", + "doc.go", + "functions.go", + "list.go", + "network.go", + "numeric.go", + "reflect.go", + "regex.go", + "semver.go", + "strings.go", + "url.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/Masterminds/sprig/v3", + importpath = "github.com/Masterminds/sprig/v3", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/Masterminds/goutils", + "//vendor/github.com/Masterminds/semver/v3:semver", + "//vendor/github.com/google/uuid", + "//vendor/github.com/huandu/xstrings", + "//vendor/github.com/imdario/mergo", + "//vendor/github.com/mitchellh/copystructure", + "//vendor/github.com/shopspring/decimal", + "//vendor/github.com/spf13/cast", + "//vendor/golang.org/x/crypto/bcrypt", + "//vendor/golang.org/x/crypto/scrypt", + ], +) diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md new file mode 100644 index 00000000..2ce45dd4 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -0,0 +1,383 @@ +# Changelog + +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt new file mode 100644 index 00000000..f311b1ea --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile new file mode 100644 index 00000000..78d409cd --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md new file mode 100644 index 00000000..3e22c60e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/README.md @@ -0,0 +1,100 @@ +# Sprig: Template functions for Go templates + +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. Sprig is a library that provides more than 100 commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## IMPORTANT NOTES + +Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In +its v0.3.9 release, there was a behavior change that impacts merging template +functions in sprig. It is currently recommended to use v0.3.10 or later of that package. +Using v0.3.9 will cause sprig tests to fail. + +## Package Versions + +There are two active major versions of the `sprig` package. + +* v3 is currently stable release series on the `master` branch. The Go API should + remain compatible with v2, the current stable version. Behavior change behind + some functions is the reason for the new major version. +* v2 is the previous stable release series. It has been more than three years since + the initial release of v2. You can read the documentation and see the code + on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. + Bug fixes to this major version will continue for some time. + +## Usage + +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig/v3" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go new file mode 100644 index 00000000..13a5cd55 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -0,0 +1,653 @@ +package sprig + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "hash/adler32" + "io" + "math/big" + "net" + "time" + + "strings" + + "github.com/google/uuid" + bcrypt_lib "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/scrypt" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + +func bcrypt(input string) string { + hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) + if err != nil { + return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) + } + + return string(hash) +} + +func htpasswd(username string, password string) string { + if strings.Contains(username, ":") { + return fmt.Sprintf("invalid username: %s", username) + } + return fmt.Sprintf("%s:%s", username, bcrypt(password)) +} + +func randBytes(count int) (string, error) { + buf := make([]byte, count) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(buf), nil +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return uuid.New().String() +} + +var masterPasswordSeed = "com.lyndir.masterpassword" + +var passwordTypeTemplates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var templateCharacters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, passwordType, password, user, site string) string { + var templates = passwordTypeTemplates[passwordType] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", passwordType) + } + + var buffer bytes.Buffer + buffer.WriteString(masterPasswordSeed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(masterPasswordSeed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + passChars := templateCharacters[element] + passChar := passChars[int(seed[i+1])%len(passChars)] + buffer.WriteByte(passChar) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +// DSAKeyFormat stores the format for DSA keys. +// Used by pemBlockForKey +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + // attempt PKCS#8 format for all other keys + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil + } + return &pem.Block{Type: "PRIVATE KEY", Bytes: b} + } +} + +func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { + block, _ := pem.Decode([]byte(pemBlock)) + if block == nil { + return nil, errors.New("no PEM data in input") + } + + if block.Type == "PRIVATE KEY" { + priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) + } + return priv, nil + } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { + return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) + } + + switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" + case "RSA": + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) + } + return priv, nil + case "EC": + priv, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) + } + return priv, nil + case "DSA": + var k DSAKeyFormat + _, err := asn1.Unmarshal(block.Bytes, &k) + if err != nil { + return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, Q: k.Q, G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + return priv, nil + default: + return nil, fmt.Errorf("invalid private key type %s", block.Type) + } +} + +func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { + switch k := priv.(type) { + case interface{ Public() crypto.PublicKey }: + return k.Public(), nil + case *dsa.PrivateKey: + return &k.PublicKey, nil + default: + return nil, fmt.Errorf("unable to get public key for type %T", priv) + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + _, err = parsePrivateKeyPEM(string(key)) + if err != nil { + return crt, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithPEMKey( + cn string, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithKeyInternal( + cn string, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + + return ca, err +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + + return cert, err +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + signerKey, err := parsePrivateKeyPEM(ca.Key) + if err != nil { + return cert, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + + return cert, err +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey crypto.PrivateKey, + parent *x509.Certificate, + signingKey crypto.PrivateKey, +) (string, string, error) { + signeePubKey, err := getPublicKey(signeeKey) + if err != nil { + return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) + } + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + signeePubKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + pemBlockForKey(signeeKey), + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return certBuffer.String(), keyBuffer.String(), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go new file mode 100644 index 00000000..ed022dda --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go new file mode 100644 index 00000000..b9f97966 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go new file mode 100644 index 00000000..ade88969 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -0,0 +1,174 @@ +package sprig + +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := mustDeepCopy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} + +func mustDeepCopy(i interface{}) (interface{}, error) { + return copystructure.Copy(i) +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go new file mode 100644 index 00000000..aabb9d44 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go new file mode 100644 index 00000000..57fcec1d --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -0,0 +1,382 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/Masterminds/goutils" + "github.com/huandu/xstrings" + "github.com/shopspring/decimal" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + "camelcase": xstrings.ToCamelCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "add1f": func(i interface{}) float64 { + return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "addf": func(i ...interface{}) float64 { + a := interface{}(float64(0)) + return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "subf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) + }, + "divf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) + }, + "mulf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) + }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + "deepCopy": deepCopy, + "mustDeepCopy": mustDeepCopy, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "mustMerge": mustMerge, + "mustMergeOverwrite": mustMergeOverwrite, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Crypto: + "bcrypt": bcrypt, + "htpasswd": htpasswd, + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genCAWithKey": generateCertificateAuthorityWithPEMKey, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, + "genSignedCert": generateSignedCertificate, + "genSignedCertWithKey": generateSignedCertificateWithPEMKey, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + "randBytes": randBytes, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go new file mode 100644 index 00000000..ca0fbb78 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go new file mode 100644 index 00000000..108d78a9 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go new file mode 100644 index 00000000..f68e4182 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go @@ -0,0 +1,186 @@ +package sprig + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/spf13/cast" + "github.com/shopspring/decimal" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + return cast.ToFloat64(v) +} + +func toInt(v interface{}) int { + return cast.ToInt(v) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + return cast.ToInt64(v) +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} + +// performs a float and subsequent decimal.Decimal conversion on inputs, +// and iterates through a and b executing the mathmetical operation f +func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { + prt := decimal.NewFromFloat(toFloat64(a)) + for _, x := range b { + dx := decimal.NewFromFloat(toFloat64(x)) + prt = f(prt, dx) + } + rslt, _ := prt.Float64() + return rslt +} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go new file mode 100644 index 00000000..8a65c132 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go new file mode 100644 index 00000000..fab55101 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go new file mode 100644 index 00000000..3fbe08aa --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver/v3" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go new file mode 100644 index 00000000..e0ae628c --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/strings.go @@ -0,0 +1,236 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/Masterminds/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.CryptoRandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.CryptoRandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.CryptoRandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.CryptoRandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go new file mode 100644 index 00000000..b8e120e1 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/agext/levenshtein/.gitignore b/vendor/github.com/agext/levenshtein/.gitignore new file mode 100644 index 00000000..404365f6 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/.gitignore @@ -0,0 +1,2 @@ +README.html +coverage.out diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml new file mode 100644 index 00000000..95be94af --- /dev/null +++ b/vendor/github.com/agext/levenshtein/.travis.yml @@ -0,0 +1,70 @@ +language: go +sudo: false +go: + - 1.8 + - 1.7.5 + - 1.7.4 + - 1.7.3 + - 1.7.2 + - 1.7.1 + - 1.7 + - tip + - 1.6.4 + - 1.6.3 + - 1.6.2 + - 1.6.1 + - 1.6 + - 1.5.4 + - 1.5.3 + - 1.5.2 + - 1.5.1 + - 1.5 + - 1.4.3 + - 1.4.2 + - 1.4.1 + - 1.4 + - 1.3.3 + - 1.3.2 + - 1.3.1 + - 1.3 + - 1.2.2 + - 1.2.1 + - 1.2 + - 1.1.2 + - 1.1.1 + - 1.1 +before_install: + - go get github.com/mattn/goveralls +script: + - $HOME/gopath/bin/goveralls -service=travis-ci +notifications: + email: + on_success: never +matrix: + fast_finish: true + allow_failures: + - go: tip + - go: 1.6.4 + - go: 1.6.3 + - go: 1.6.2 + - go: 1.6.1 + - go: 1.6 + - go: 1.5.4 + - go: 1.5.3 + - go: 1.5.2 + - go: 1.5.1 + - go: 1.5 + - go: 1.4.3 + - go: 1.4.2 + - go: 1.4.1 + - go: 1.4 + - go: 1.3.3 + - go: 1.3.2 + - go: 1.3.1 + - go: 1.3 + - go: 1.2.2 + - go: 1.2.1 + - go: 1.2 + - go: 1.1.2 + - go: 1.1.1 + - go: 1.1 diff --git a/vendor/github.com/agext/levenshtein/BUILD b/vendor/github.com/agext/levenshtein/BUILD new file mode 100644 index 00000000..bc7fb027 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/BUILD @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "levenshtein", + srcs = [ + "levenshtein.go", + "params.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/agext/levenshtein", + importpath = "github.com/agext/levenshtein", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/agext/levenshtein/DCO b/vendor/github.com/agext/levenshtein/DCO new file mode 100644 index 00000000..716561d5 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/agext/levenshtein/LICENSE b/vendor/github.com/agext/levenshtein/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/agext/levenshtein/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/agext/levenshtein/MAINTAINERS b/vendor/github.com/agext/levenshtein/MAINTAINERS new file mode 100644 index 00000000..726c2afb --- /dev/null +++ b/vendor/github.com/agext/levenshtein/MAINTAINERS @@ -0,0 +1 @@ +Alex Bucataru (@AlexBucataru) diff --git a/vendor/github.com/agext/levenshtein/NOTICE b/vendor/github.com/agext/levenshtein/NOTICE new file mode 100644 index 00000000..eaffaab9 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/NOTICE @@ -0,0 +1,5 @@ +Alrux Go EXTensions (AGExt) - package levenshtein +Copyright 2016 ALRUX Inc. + +This product includes software developed at ALRUX Inc. +(http://www.alrux.com/). diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md new file mode 100644 index 00000000..90509c2a --- /dev/null +++ b/vendor/github.com/agext/levenshtein/README.md @@ -0,0 +1,38 @@ +# A Go package for calculating the Levenshtein distance between two strings + +[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein)  +[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein) +[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein) +[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein) + + +This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org). + +## Project Status + +v1.2.1 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. + +This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. + +## Overview + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. + +## Installation + +``` +go get github.com/agext/levenshtein +``` + +## License + +Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go new file mode 100644 index 00000000..df69ce70 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -0,0 +1,290 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure. + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. +*/ +package levenshtein + +// Calculate determines the Levenshtein distance between two strings, using +// the given costs for each edit operation. It returns the distance along with +// the lengths of the longest common prefix and suffix. +// +// If maxCost is non-zero, the calculation stops as soon as the distance is determined +// to be greater than maxCost. Therefore, any return value higher than maxCost is a +// lower bound for the actual distance. +func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) { + l1, l2 := len(str1), len(str2) + // trim common prefix, if any, as it doesn't affect the distance + for ; prefixLen < l1 && prefixLen < l2; prefixLen++ { + if str1[prefixLen] != str2[prefixLen] { + break + } + } + str1, str2 = str1[prefixLen:], str2[prefixLen:] + l1 -= prefixLen + l2 -= prefixLen + // trim common suffix, if any, as it doesn't affect the distance + for 0 < l1 && 0 < l2 { + if str1[l1-1] != str2[l2-1] { + str1, str2 = str1[:l1], str2[:l2] + break + } + l1-- + l2-- + suffixLen++ + } + // if the first string is empty, the distance is the length of the second string times the cost of insertion + if l1 == 0 { + dist = l2 * insCost + return + } + // if the second string is empty, the distance is the length of the first string times the cost of deletion + if l2 == 0 { + dist = l1 * delCost + return + } + + // variables used in inner "for" loops + var y, dy, c, l int + + // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited' + if maxCost > 0 { + if subCost < delCost+insCost { + if maxCost >= l1*subCost+(l2-l1)*insCost { + maxCost = 0 + } + } else { + if maxCost >= l1*delCost+l2*insCost { + maxCost = 0 + } + } + } + + if maxCost > 0 { + // prefer the longer string first, to minimize time; + // a swap also transposes the meanings of insertion and deletion. + if l1 < l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + + // the length differential times cost of deletion is a lower bound for the cost; + // if it is higher than the maxCost, there is no point going into the main calculation. + if dist = (l1 - l2) * delCost; dist > maxCost { + return + } + + d := make([]int, l1+1) + + // offset and length of d in the current row + doff, dlen := 0, 1 + for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ { + d[y] = dy + y++ + dy = y * delCost + } + // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + + for x := 0; x < l2; x++ { + dy, d[doff] = d[doff], d[doff]+insCost + for d[doff] > maxCost && dlen > 0 { + if str1[doff] != str2[x] { + dy += subCost + } + doff++ + dlen-- + if c = d[doff] + insCost; c < dy { + dy = c + } + dy, d[doff] = d[doff], dy + } + for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + if y < l1 { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy { + y++ + dlen++ + } + } + // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + if dlen == 0 { + dist = maxCost + 1 + return + } + } + if doff+dlen-1 < l1 { + dist = maxCost + 1 + return + } + dist = d[l1] + } else { + // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is + // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space + // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html + + // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway; + // a swap also transposes the meanings of insertion and deletion. + if l1 > l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + d := make([]int, l1+1) + + for y = 1; y <= l1; y++ { + d[y] = y * delCost + } + for x := 0; x < l2; x++ { + dy, d[0] = d[0], d[0]+insCost + for y = 0; y < l1; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + } + dist = d[l1] + } + + return +} + +// Distance returns the Levenshtein distance between str1 and str2, using the +// default or provided cost values. Pass nil for the third argument to use the +// default cost of 1 for all three operations, with no maximum. +func Distance(str1, str2 string, p *Params) int { + if p == nil { + p = defaultParams + } + dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost) + return dist +} + +// Similarity returns a score in the range of 0..1 for how similar the two strings are. +// A score of 1 means the strings are identical, and 0 means they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Similarity(str1, str2 string, p *Params) float64 { + return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus +} + +// Match returns a similarity score adjusted by the same method as proposed by Winkler for +// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their +// similarity score is already over a threshold. +// +// The score is in the range of 0..1, with 1 meaning the strings are identical, +// and 0 meaning they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations, maximum length of +// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Match(str1, str2 string, p *Params) float64 { + s1, s2 := []rune(str1), []rune(str2) + l1, l2 := len(s1), len(s2) + // two empty strings are identical; shortcut also avoids divByZero issues later on. + if l1 == 0 && l2 == 0 { + return 1 + } + + if p == nil { + p = defaultParams + } + + // a min over 1 can never be satisfied, so the score is 0. + if p.minScore > 1 { + return 0 + } + + insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0 + if l1 > l2 { + l1, l2, insCost, delCost = l2, l1, delCost, insCost + } + + if p.subCost < delCost+insCost { + maxDist = l1*p.subCost + (l2-l1)*insCost + } else { + maxDist = l1*delCost + l2*insCost + } + + // a zero min is always satisfied, so no need to set a max cost. + if p.minScore > 0 { + // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula + // for the max cost, because a sim score below min cannot receive a bonus. + if p.minScore < p.bonusThreshold { + // round down the max - a cost equal to a rounded up max would already be under min. + max = int((1 - p.minScore) * float64(maxDist)) + } else { + // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim) + // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist)) + // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist + // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist + // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist + max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale)) + } + } + + dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost) + if max > 0 && dist > max { + return 0 + } + sim := 1 - float64(dist)/float64(maxDist) + + if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 { + if pl > p.bonusPrefix { + pl = p.bonusPrefix + } + sim += float64(pl) * p.bonusScale * (1 - sim) + } + + if sim < p.minScore { + return 0 + } + + return sim +} diff --git a/vendor/github.com/agext/levenshtein/params.go b/vendor/github.com/agext/levenshtein/params.go new file mode 100644 index 00000000..a85727b3 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/params.go @@ -0,0 +1,152 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package levenshtein + +// Params represents a set of parameter values for the various formulas involved +// in the calculation of the Levenshtein string metrics. +type Params struct { + insCost int + subCost int + delCost int + maxCost int + minScore float64 + bonusPrefix int + bonusScale float64 + bonusThreshold float64 +} + +var ( + defaultParams = NewParams() +) + +// NewParams creates a new set of parameters and initializes it with the default values. +func NewParams() *Params { + return &Params{ + insCost: 1, + subCost: 1, + delCost: 1, + maxCost: 0, + minScore: 0, + bonusPrefix: 4, + bonusScale: .1, + bonusThreshold: .7, + } +} + +// Clone returns a pointer to a copy of the receiver parameter set, or of a new +// default parameter set if the receiver is nil. +func (p *Params) Clone() *Params { + if p == nil { + return NewParams() + } + return &Params{ + insCost: p.insCost, + subCost: p.subCost, + delCost: p.delCost, + maxCost: p.maxCost, + minScore: p.minScore, + bonusPrefix: p.bonusPrefix, + bonusScale: p.bonusScale, + bonusThreshold: p.bonusThreshold, + } +} + +// InsCost overrides the default value of 1 for the cost of insertion. +// The new value must be zero or positive. +func (p *Params) InsCost(v int) *Params { + if v >= 0 { + p.insCost = v + } + return p +} + +// SubCost overrides the default value of 1 for the cost of substitution. +// The new value must be zero or positive. +func (p *Params) SubCost(v int) *Params { + if v >= 0 { + p.subCost = v + } + return p +} + +// DelCost overrides the default value of 1 for the cost of deletion. +// The new value must be zero or positive. +func (p *Params) DelCost(v int) *Params { + if v >= 0 { + p.delCost = v + } + return p +} + +// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost. +// The calculation of Distance() stops when the result is guaranteed to exceed +// this maximum, returning a lower-bound rather than exact value. +// The new value must be zero or positive. +func (p *Params) MaxCost(v int) *Params { + if v >= 0 { + p.maxCost = v + } + return p +} + +// MinScore overrides the default value of 0 for the minimum similarity score. +// Scores below this threshold are returned as 0 by Similarity() and Match(). +// The new value must be zero or positive. Note that a minimum greater than 1 +// can never be satisfied, resulting in a score of 0 for any pair of strings. +func (p *Params) MinScore(v float64) *Params { + if v >= 0 { + p.minScore = v + } + return p +} + +// BonusPrefix overrides the default value for the maximum length of +// common prefix to be considered for bonus by Match(). +// The new value must be zero or positive. +func (p *Params) BonusPrefix(v int) *Params { + if v >= 0 { + p.bonusPrefix = v + } + return p +} + +// BonusScale overrides the default value for the scaling factor used by Match() +// in calculating the bonus. +// The new value must be zero or positive. To guarantee that the similarity score +// remains in the interval 0..1, this scaling factor is not allowed to exceed +// 1 / BonusPrefix. +func (p *Params) BonusScale(v float64) *Params { + if v >= 0 { + p.bonusScale = v + } + + // the bonus cannot exceed (1-sim), or the score may become greater than 1. + if float64(p.bonusPrefix)*p.bonusScale > 1 { + p.bonusScale = 1 / float64(p.bonusPrefix) + } + + return p +} + +// BonusThreshold overrides the default value for the minimum similarity score +// for which Match() can assign a bonus. +// The new value must be zero or positive. Note that a threshold greater than 1 +// effectively makes Match() become the equivalent of Similarity(). +func (p *Params) BonusThreshold(v float64) *Params { + if v >= 0 { + p.bonusThreshold = v + } + return p +} diff --git a/vendor/github.com/apparentlymart/go-textseg/v13/LICENSE b/vendor/github.com/apparentlymart/go-textseg/v13/LICENSE new file mode 100644 index 00000000..684b03b4 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/v13/LICENSE @@ -0,0 +1,95 @@ +Copyright (c) 2017 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------- + +Unicode table generation programs are under a separate copyright and license: + +Copyright (c) 2014 Couchbase, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the +License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See the License for the specific language governing permissions +and limitations under the License. + +--------- + +Grapheme break data is provided as part of the Unicode character database, +copright 2016 Unicode, Inc, which is provided with the following license: + +Unicode Data Files include all data files under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +Unicode Data Files do not include PDF online code charts under the +directory http://www.unicode.org/Public/. + +Software includes any source code published in the Unicode Standard +or under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +NOTICE TO USER: Carefully read the following legal agreement. +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +THE DATA FILES OR SOFTWARE. + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 1991-2017 Unicode, Inc. All rights reserved. +Distributed under the Terms of Use in http://www.unicode.org/copyright.html. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Unicode data files and any associated documentation +(the "Data Files") or Unicode software and any associated documentation +(the "Software") to deal in the Data Files or Software +without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, and/or sell copies of +the Data Files or Software, and to permit persons to whom the Data Files +or Software are furnished to do so, provided that either +(a) this copyright and permission notice appear with all copies +of the Data Files or Software, or +(b) this copyright and permission notice appear in associated +Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT OF THIRD PARTY RIGHTS. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THE DATA FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder +shall not be used in advertising or otherwise to promote the sale, +use or other dealings in these Data Files or Software without prior +written authorization of the copyright holder. diff --git a/vendor/github.com/apparentlymart/go-textseg/v13/textseg/BUILD b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/BUILD new file mode 100644 index 00000000..45669ace --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/BUILD @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "textseg", + srcs = [ + "all_tokens.go", + "generate.go", + "grapheme_clusters.go", + "tables.go", + "utf8_seqs.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/apparentlymart/go-textseg/v13/textseg", + importpath = "github.com/apparentlymart/go-textseg/v13/textseg", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/apparentlymart/go-textseg/v13/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/all_tokens.go new file mode 100644 index 00000000..5752e9ef --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/all_tokens.go @@ -0,0 +1,30 @@ +package textseg + +import ( + "bufio" + "bytes" +) + +// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of +// all of the recognized tokens in the given buffer. +func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret [][]byte + for scanner.Scan() { + ret = append(ret, scanner.Bytes()) + } + return ret, scanner.Err() +} + +// TokenCount is a utility that uses a bufio.SplitFunc to count the number of +// recognized tokens in the given buffer. +func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret int + for scanner.Scan() { + ret++ + } + return ret, scanner.Err() +} diff --git a/vendor/github.com/apparentlymart/go-textseg/v13/textseg/emoji_table.rl b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/emoji_table.rl new file mode 100644 index 00000000..f2cb484a --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/emoji_table.rl @@ -0,0 +1,525 @@ +# The following Ragel file was autogenerated with unicode2ragel.rb +# from: https://www.unicode.org/Public/13.0.0/ucd/emoji/emoji-data.txt +# +# It defines ["Extended_Pictographic"]. +# +# To use this, make sure that your alphtype is set to byte, +# and that your input is in utf8. + +%%{ + machine Emoji; + + Extended_Pictographic = + 0xC2 0xA9 #E0.6 [1] (©️) copyright + | 0xC2 0xAE #E0.6 [1] (®️) registered + | 0xE2 0x80 0xBC #E0.6 [1] (‼️) double exclamation mark + | 0xE2 0x81 0x89 #E0.6 [1] (⁉️) exclamation question ... + | 0xE2 0x84 0xA2 #E0.6 [1] (™️) trade mark + | 0xE2 0x84 0xB9 #E0.6 [1] (ℹ️) information + | 0xE2 0x86 0x94..0x99 #E0.6 [6] (↔️..↙️) left-right arrow..do... + | 0xE2 0x86 0xA9..0xAA #E0.6 [2] (↩️..↪️) right arrow curving ... + | 0xE2 0x8C 0x9A..0x9B #E0.6 [2] (⌚..⌛) watch..hourglass done + | 0xE2 0x8C 0xA8 #E1.0 [1] (⌨️) keyboard + | 0xE2 0x8E 0x88 #E0.0 [1] (⎈) HELM SYMBOL + | 0xE2 0x8F 0x8F #E1.0 [1] (⏏️) eject button + | 0xE2 0x8F 0xA9..0xAC #E0.6 [4] (⏩..⏬) fast-forward button..f... + | 0xE2 0x8F 0xAD..0xAE #E0.7 [2] (⏭️..⏮️) next track button..l... + | 0xE2 0x8F 0xAF #E1.0 [1] (⏯️) play or pause button + | 0xE2 0x8F 0xB0 #E0.6 [1] (⏰) alarm clock + | 0xE2 0x8F 0xB1..0xB2 #E1.0 [2] (⏱️..⏲️) stopwatch..timer clock + | 0xE2 0x8F 0xB3 #E0.6 [1] (⏳) hourglass not done + | 0xE2 0x8F 0xB8..0xBA #E0.7 [3] (⏸️..⏺️) pause button..record... + | 0xE2 0x93 0x82 #E0.6 [1] (Ⓜ️) circled M + | 0xE2 0x96 0xAA..0xAB #E0.6 [2] (▪️..▫️) black small square..... + | 0xE2 0x96 0xB6 #E0.6 [1] (▶️) play button + | 0xE2 0x97 0x80 #E0.6 [1] (◀️) reverse button + | 0xE2 0x97 0xBB..0xBE #E0.6 [4] (◻️..◾) white medium square..... + | 0xE2 0x98 0x80..0x81 #E0.6 [2] (☀️..☁️) sun..cloud + | 0xE2 0x98 0x82..0x83 #E0.7 [2] (☂️..☃️) umbrella..snowman + | 0xE2 0x98 0x84 #E1.0 [1] (☄️) comet + | 0xE2 0x98 0x85 #E0.0 [1] (★) BLACK STAR + | 0xE2 0x98 0x87..0x8D #E0.0 [7] (☇..☍) LIGHTNING..OPPOSITION + | 0xE2 0x98 0x8E #E0.6 [1] (☎️) telephone + | 0xE2 0x98 0x8F..0x90 #E0.0 [2] (☏..☐) WHITE TELEPHONE..BALLO... + | 0xE2 0x98 0x91 #E0.6 [1] (☑️) check box with check + | 0xE2 0x98 0x92 #E0.0 [1] (☒) BALLOT BOX WITH X + | 0xE2 0x98 0x94..0x95 #E0.6 [2] (☔..☕) umbrella with rain dro... + | 0xE2 0x98 0x96..0x97 #E0.0 [2] (☖..☗) WHITE SHOGI PIECE..BLA... + | 0xE2 0x98 0x98 #E1.0 [1] (☘️) shamrock + | 0xE2 0x98 0x99..0x9C #E0.0 [4] (☙..☜) REVERSED ROTATED FLORA... + | 0xE2 0x98 0x9D #E0.6 [1] (☝️) index pointing up + | 0xE2 0x98 0x9E..0x9F #E0.0 [2] (☞..☟) WHITE RIGHT POINTING I... + | 0xE2 0x98 0xA0 #E1.0 [1] (☠️) skull and crossbones + | 0xE2 0x98 0xA1 #E0.0 [1] (☡) CAUTION SIGN + | 0xE2 0x98 0xA2..0xA3 #E1.0 [2] (☢️..☣️) radioactive..biohazard + | 0xE2 0x98 0xA4..0xA5 #E0.0 [2] (☤..☥) CADUCEUS..ANKH + | 0xE2 0x98 0xA6 #E1.0 [1] (☦️) orthodox cross + | 0xE2 0x98 0xA7..0xA9 #E0.0 [3] (☧..☩) CHI RHO..CROSS OF JERU... + | 0xE2 0x98 0xAA #E0.7 [1] (☪️) star and crescent + | 0xE2 0x98 0xAB..0xAD #E0.0 [3] (☫..☭) FARSI SYMBOL..HAMMER A... + | 0xE2 0x98 0xAE #E1.0 [1] (☮️) peace symbol + | 0xE2 0x98 0xAF #E0.7 [1] (☯️) yin yang + | 0xE2 0x98 0xB0..0xB7 #E0.0 [8] (☰..☷) TRIGRAM FOR HEAVEN..TR... + | 0xE2 0x98 0xB8..0xB9 #E0.7 [2] (☸️..☹️) wheel of dharma..fro... + | 0xE2 0x98 0xBA #E0.6 [1] (☺️) smiling face + | 0xE2 0x98 0xBB..0xBF #E0.0 [5] (☻..☿) BLACK SMILING FACE..ME... + | 0xE2 0x99 0x80 #E4.0 [1] (♀️) female sign + | 0xE2 0x99 0x81 #E0.0 [1] (♁) EARTH + | 0xE2 0x99 0x82 #E4.0 [1] (♂️) male sign + | 0xE2 0x99 0x83..0x87 #E0.0 [5] (♃..♇) JUPITER..PLUTO + | 0xE2 0x99 0x88..0x93 #E0.6 [12] (♈..♓) Aries..Pisces + | 0xE2 0x99 0x94..0x9E #E0.0 [11] (♔..♞) WHITE CHESS KING..BLAC... + | 0xE2 0x99 0x9F #E11.0 [1] (♟️) chess pawn + | 0xE2 0x99 0xA0 #E0.6 [1] (♠️) spade suit + | 0xE2 0x99 0xA1..0xA2 #E0.0 [2] (♡..♢) WHITE HEART SUIT..WHIT... + | 0xE2 0x99 0xA3 #E0.6 [1] (♣️) club suit + | 0xE2 0x99 0xA4 #E0.0 [1] (♤) WHITE SPADE SUIT + | 0xE2 0x99 0xA5..0xA6 #E0.6 [2] (♥️..♦️) heart suit..diamond ... + | 0xE2 0x99 0xA7 #E0.0 [1] (♧) WHITE CLUB SUIT + | 0xE2 0x99 0xA8 #E0.6 [1] (♨️) hot springs + | 0xE2 0x99 0xA9..0xBA #E0.0 [18] (♩..♺) QUARTER NOTE..RECYCLIN... + | 0xE2 0x99 0xBB #E0.6 [1] (♻️) recycling symbol + | 0xE2 0x99 0xBC..0xBD #E0.0 [2] (♼..♽) RECYCLED PAPER SYMBOL.... + | 0xE2 0x99 0xBE #E11.0 [1] (♾️) infinity + | 0xE2 0x99 0xBF #E0.6 [1] (♿) wheelchair symbol + | 0xE2 0x9A 0x80..0x85 #E0.0 [6] (⚀..⚅) DIE FACE-1..DIE FACE-6 + | 0xE2 0x9A 0x90..0x91 #E0.0 [2] (⚐..⚑) WHITE FLAG..BLACK FLAG + | 0xE2 0x9A 0x92 #E1.0 [1] (⚒️) hammer and pick + | 0xE2 0x9A 0x93 #E0.6 [1] (⚓) anchor + | 0xE2 0x9A 0x94 #E1.0 [1] (⚔️) crossed swords + | 0xE2 0x9A 0x95 #E4.0 [1] (⚕️) medical symbol + | 0xE2 0x9A 0x96..0x97 #E1.0 [2] (⚖️..⚗️) balance scale..alembic + | 0xE2 0x9A 0x98 #E0.0 [1] (⚘) FLOWER + | 0xE2 0x9A 0x99 #E1.0 [1] (⚙️) gear + | 0xE2 0x9A 0x9A #E0.0 [1] (⚚) STAFF OF HERMES + | 0xE2 0x9A 0x9B..0x9C #E1.0 [2] (⚛️..⚜️) atom symbol..fleur-d... + | 0xE2 0x9A 0x9D..0x9F #E0.0 [3] (⚝..⚟) OUTLINED WHITE STAR..T... + | 0xE2 0x9A 0xA0..0xA1 #E0.6 [2] (⚠️..⚡) warning..high voltage + | 0xE2 0x9A 0xA2..0xA6 #E0.0 [5] (⚢..⚦) DOUBLED FEMALE SIGN..M... + | 0xE2 0x9A 0xA7 #E13.0 [1] (⚧️) transgender symbol + | 0xE2 0x9A 0xA8..0xA9 #E0.0 [2] (⚨..⚩) VERTICAL MALE WITH STR... + | 0xE2 0x9A 0xAA..0xAB #E0.6 [2] (⚪..⚫) white circle..black ci... + | 0xE2 0x9A 0xAC..0xAF #E0.0 [4] (⚬..⚯) MEDIUM SMALL WHITE CIR... + | 0xE2 0x9A 0xB0..0xB1 #E1.0 [2] (⚰️..⚱️) coffin..funeral urn + | 0xE2 0x9A 0xB2..0xBC #E0.0 [11] (⚲..⚼) NEUTER..SESQUIQUADRATE + | 0xE2 0x9A 0xBD..0xBE #E0.6 [2] (⚽..⚾) soccer ball..baseball + | 0xE2 0x9A 0xBF..0xFF #E0.0 [5] (⚿..⛃) SQUARED KEY..BLACK DRA... + | 0xE2 0x9B 0x00..0x83 # + | 0xE2 0x9B 0x84..0x85 #E0.6 [2] (⛄..⛅) snowman without snow..... + | 0xE2 0x9B 0x86..0x87 #E0.0 [2] (⛆..⛇) RAIN..BLACK SNOWMAN + | 0xE2 0x9B 0x88 #E0.7 [1] (⛈️) cloud with lightning ... + | 0xE2 0x9B 0x89..0x8D #E0.0 [5] (⛉..⛍) TURNED WHITE SHOGI PIE... + | 0xE2 0x9B 0x8E #E0.6 [1] (⛎) Ophiuchus + | 0xE2 0x9B 0x8F #E0.7 [1] (⛏️) pick + | 0xE2 0x9B 0x90 #E0.0 [1] (⛐) CAR SLIDING + | 0xE2 0x9B 0x91 #E0.7 [1] (⛑️) rescue worker’s helmet + | 0xE2 0x9B 0x92 #E0.0 [1] (⛒) CIRCLED CROSSING LANES + | 0xE2 0x9B 0x93 #E0.7 [1] (⛓️) chains + | 0xE2 0x9B 0x94 #E0.6 [1] (⛔) no entry + | 0xE2 0x9B 0x95..0xA8 #E0.0 [20] (⛕..⛨) ALTERNATE ONE-WAY LEFT... + | 0xE2 0x9B 0xA9 #E0.7 [1] (⛩️) shinto shrine + | 0xE2 0x9B 0xAA #E0.6 [1] (⛪) church + | 0xE2 0x9B 0xAB..0xAF #E0.0 [5] (⛫..⛯) CASTLE..MAP SYMBOL FOR... + | 0xE2 0x9B 0xB0..0xB1 #E0.7 [2] (⛰️..⛱️) mountain..umbrella o... + | 0xE2 0x9B 0xB2..0xB3 #E0.6 [2] (⛲..⛳) fountain..flag in hole + | 0xE2 0x9B 0xB4 #E0.7 [1] (⛴️) ferry + | 0xE2 0x9B 0xB5 #E0.6 [1] (⛵) sailboat + | 0xE2 0x9B 0xB6 #E0.0 [1] (⛶) SQUARE FOUR CORNERS + | 0xE2 0x9B 0xB7..0xB9 #E0.7 [3] (⛷️..⛹️) skier..person bounci... + | 0xE2 0x9B 0xBA #E0.6 [1] (⛺) tent + | 0xE2 0x9B 0xBB..0xBC #E0.0 [2] (⛻..⛼) JAPANESE BANK SYMBOL..... + | 0xE2 0x9B 0xBD #E0.6 [1] (⛽) fuel pump + | 0xE2 0x9B 0xBE..0xFF #E0.0 [4] (⛾..✁) CUP ON BLACK SQUARE..U... + | 0xE2 0x9C 0x00..0x81 # + | 0xE2 0x9C 0x82 #E0.6 [1] (✂️) scissors + | 0xE2 0x9C 0x83..0x84 #E0.0 [2] (✃..✄) LOWER BLADE SCISSORS..... + | 0xE2 0x9C 0x85 #E0.6 [1] (✅) check mark button + | 0xE2 0x9C 0x88..0x8C #E0.6 [5] (✈️..✌️) airplane..victory hand + | 0xE2 0x9C 0x8D #E0.7 [1] (✍️) writing hand + | 0xE2 0x9C 0x8E #E0.0 [1] (✎) LOWER RIGHT PENCIL + | 0xE2 0x9C 0x8F #E0.6 [1] (✏️) pencil + | 0xE2 0x9C 0x90..0x91 #E0.0 [2] (✐..✑) UPPER RIGHT PENCIL..WH... + | 0xE2 0x9C 0x92 #E0.6 [1] (✒️) black nib + | 0xE2 0x9C 0x94 #E0.6 [1] (✔️) check mark + | 0xE2 0x9C 0x96 #E0.6 [1] (✖️) multiply + | 0xE2 0x9C 0x9D #E0.7 [1] (✝️) latin cross + | 0xE2 0x9C 0xA1 #E0.7 [1] (✡️) star of David + | 0xE2 0x9C 0xA8 #E0.6 [1] (✨) sparkles + | 0xE2 0x9C 0xB3..0xB4 #E0.6 [2] (✳️..✴️) eight-spoked asteris... + | 0xE2 0x9D 0x84 #E0.6 [1] (❄️) snowflake + | 0xE2 0x9D 0x87 #E0.6 [1] (❇️) sparkle + | 0xE2 0x9D 0x8C #E0.6 [1] (❌) cross mark + | 0xE2 0x9D 0x8E #E0.6 [1] (❎) cross mark button + | 0xE2 0x9D 0x93..0x95 #E0.6 [3] (❓..❕) question mark..white e... + | 0xE2 0x9D 0x97 #E0.6 [1] (❗) exclamation mark + | 0xE2 0x9D 0xA3 #E1.0 [1] (❣️) heart exclamation + | 0xE2 0x9D 0xA4 #E0.6 [1] (❤️) red heart + | 0xE2 0x9D 0xA5..0xA7 #E0.0 [3] (❥..❧) ROTATED HEAVY BLACK HE... + | 0xE2 0x9E 0x95..0x97 #E0.6 [3] (➕..➗) plus..divide + | 0xE2 0x9E 0xA1 #E0.6 [1] (➡️) right arrow + | 0xE2 0x9E 0xB0 #E0.6 [1] (➰) curly loop + | 0xE2 0x9E 0xBF #E1.0 [1] (➿) double curly loop + | 0xE2 0xA4 0xB4..0xB5 #E0.6 [2] (⤴️..⤵️) right arrow curving ... + | 0xE2 0xAC 0x85..0x87 #E0.6 [3] (⬅️..⬇️) left arrow..down arrow + | 0xE2 0xAC 0x9B..0x9C #E0.6 [2] (⬛..⬜) black large square..wh... + | 0xE2 0xAD 0x90 #E0.6 [1] (⭐) star + | 0xE2 0xAD 0x95 #E0.6 [1] (⭕) hollow red circle + | 0xE3 0x80 0xB0 #E0.6 [1] (〰️) wavy dash + | 0xE3 0x80 0xBD #E0.6 [1] (〽️) part alternation mark + | 0xE3 0x8A 0x97 #E0.6 [1] (㊗️) Japanese “congratulat... + | 0xE3 0x8A 0x99 #E0.6 [1] (㊙️) Japanese “secret” button + | 0xF0 0x9F 0x80 0x80..0x83 #E0.0 [4] (🀀..🀃) MAHJONG TILE EAST W... + | 0xF0 0x9F 0x80 0x84 #E0.6 [1] (🀄) mahjong red dragon + | 0xF0 0x9F 0x80 0x85..0xFF #E0.0 [202] (🀅..🃎) MAHJONG TILE ... + | 0xF0 0x9F 0x81..0x82 0x00..0xFF # + | 0xF0 0x9F 0x83 0x00..0x8E # + | 0xF0 0x9F 0x83 0x8F #E0.6 [1] (🃏) joker + | 0xF0 0x9F 0x83 0x90..0xBF #E0.0 [48] (🃐..🃿) ..<... + | 0xF0 0x9F 0x84 0x8D..0x8F #E0.0 [3] (🄍..🄏) CIRCLED ZERO WITH S... + | 0xF0 0x9F 0x84 0xAF #E0.0 [1] (🄯) COPYLEFT SYMBOL + | 0xF0 0x9F 0x85 0xAC..0xAF #E0.0 [4] (🅬..🅯) RAISED MR SIGN..CIR... + | 0xF0 0x9F 0x85 0xB0..0xB1 #E0.6 [2] (🅰️..🅱️) A button (blood t... + | 0xF0 0x9F 0x85 0xBE..0xBF #E0.6 [2] (🅾️..🅿️) O button (blood t... + | 0xF0 0x9F 0x86 0x8E #E0.6 [1] (🆎) AB button (blood type) + | 0xF0 0x9F 0x86 0x91..0x9A #E0.6 [10] (🆑..🆚) CL button..VS button + | 0xF0 0x9F 0x86 0xAD..0xFF #E0.0 [57] (🆭..🇥) MASK WORK SYMBOL..<... + | 0xF0 0x9F 0x87 0x00..0xA5 # + | 0xF0 0x9F 0x88 0x81..0x82 #E0.6 [2] (🈁..🈂️) Japanese “here” bu... + | 0xF0 0x9F 0x88 0x83..0x8F #E0.0 [13] (🈃..🈏) ..<... + | 0xF0 0x9F 0x88 0x9A #E0.6 [1] (🈚) Japanese “free of char... + | 0xF0 0x9F 0x88 0xAF #E0.6 [1] (🈯) Japanese “reserved” bu... + | 0xF0 0x9F 0x88 0xB2..0xBA #E0.6 [9] (🈲..🈺) Japanese “prohibite... + | 0xF0 0x9F 0x88 0xBC..0xBF #E0.0 [4] (🈼..🈿) ..<... + | 0xF0 0x9F 0x89 0x89..0x8F #E0.0 [7] (🉉..🉏) ..<... + | 0xF0 0x9F 0x89 0x90..0x91 #E0.6 [2] (🉐..🉑) Japanese “bargain” ... + | 0xF0 0x9F 0x89 0x92..0xFF #E0.0 [174] (🉒..🋿) ..<... + | 0xF0 0x9F 0x9B 0xA0..0xA5 #E0.7 [6] (🛠️..🛥️) hammer and wrench... + | 0xF0 0x9F 0x9B 0xA6..0xA8 #E0.0 [3] (🛦..🛨) UP-POINTING MILITAR... + | 0xF0 0x9F 0x9B 0xA9 #E0.7 [1] (🛩️) small airplane + | 0xF0 0x9F 0x9B 0xAA #E0.0 [1] (🛪) NORTHEAST-POINTING AIR... + | 0xF0 0x9F 0x9B 0xAB..0xAC #E1.0 [2] (🛫..🛬) airplane departure.... + | 0xF0 0x9F 0x9B 0xAD..0xAF #E0.0 [3] (🛭..🛯) ..<... + | 0xF0 0x9F 0x9B 0xB0 #E0.7 [1] (🛰️) satellite + | 0xF0 0x9F 0x9B 0xB1..0xB2 #E0.0 [2] (🛱..🛲) ONCOMING FIRE ENGIN... + | 0xF0 0x9F 0x9B 0xB3 #E0.7 [1] (🛳️) passenger ship + | 0xF0 0x9F 0x9B 0xB4..0xB6 #E3.0 [3] (🛴..🛶) kick scooter..canoe + | 0xF0 0x9F 0x9B 0xB7..0xB8 #E5.0 [2] (🛷..🛸) sled..flying saucer + | 0xF0 0x9F 0x9B 0xB9 #E11.0 [1] (🛹) skateboard + | 0xF0 0x9F 0x9B 0xBA #E12.0 [1] (🛺) auto rickshaw + | 0xF0 0x9F 0x9B 0xBB..0xBC #E13.0 [2] (🛻..🛼) pickup truck..rolle... + | 0xF0 0x9F 0x9B 0xBD..0xBF #E0.0 [3] (🛽..🛿) ..<... + | 0xF0 0x9F 0x9D 0xB4..0xBF #E0.0 [12] (🝴..🝿) ..<... + | 0xF0 0x9F 0x9F 0x95..0x9F #E0.0 [11] (🟕..🟟) CIRCLED TRIANGLE..<... + | 0xF0 0x9F 0x9F 0xA0..0xAB #E12.0 [12] (🟠..🟫) orange circle..brow... + | 0xF0 0x9F 0x9F 0xAC..0xBF #E0.0 [20] (🟬..🟿) ..<... + | 0xF0 0x9F 0xA0 0x8C..0x8F #E0.0 [4] (🠌..🠏) ..<... + | 0xF0 0x9F 0xA1 0x88..0x8F #E0.0 [8] (🡈..🡏) ..<... + | 0xF0 0x9F 0xA1 0x9A..0x9F #E0.0 [6] (🡚..🡟) ..<... + | 0xF0 0x9F 0xA2 0x88..0x8F #E0.0 [8] (🢈..🢏) ..<... + | 0xF0 0x9F 0xA2 0xAE..0xFF #E0.0 [82] (🢮..🣿) ..<... + | 0xF0 0x9F 0xA3 0x00..0xBF # + | 0xF0 0x9F 0xA4 0x8C #E13.0 [1] (🤌) pinched fingers + | 0xF0 0x9F 0xA4 0x8D..0x8F #E12.0 [3] (🤍..🤏) white heart..pinchi... + | 0xF0 0x9F 0xA4 0x90..0x98 #E1.0 [9] (🤐..🤘) zipper-mouth face..... + | 0xF0 0x9F 0xA4 0x99..0x9E #E3.0 [6] (🤙..🤞) call me hand..cross... + | 0xF0 0x9F 0xA4 0x9F #E5.0 [1] (🤟) love-you gesture + | 0xF0 0x9F 0xA4 0xA0..0xA7 #E3.0 [8] (🤠..🤧) cowboy hat face..sn... + | 0xF0 0x9F 0xA4 0xA8..0xAF #E5.0 [8] (🤨..🤯) face with raised ey... + | 0xF0 0x9F 0xA4 0xB0 #E3.0 [1] (🤰) pregnant woman + | 0xF0 0x9F 0xA4 0xB1..0xB2 #E5.0 [2] (🤱..🤲) breast-feeding..pal... + | 0xF0 0x9F 0xA4 0xB3..0xBA #E3.0 [8] (🤳..🤺) selfie..person fencing + | 0xF0 0x9F 0xA4 0xBC..0xBE #E3.0 [3] (🤼..🤾) people wrestling..p... + | 0xF0 0x9F 0xA4 0xBF #E12.0 [1] (🤿) diving mask + | 0xF0 0x9F 0xA5 0x80..0x85 #E3.0 [6] (🥀..🥅) wilted flower..goal... + | 0xF0 0x9F 0xA5 0x87..0x8B #E3.0 [5] (🥇..🥋) 1st place medal..ma... + | 0xF0 0x9F 0xA5 0x8C #E5.0 [1] (🥌) curling stone + | 0xF0 0x9F 0xA5 0x8D..0x8F #E11.0 [3] (🥍..🥏) lacrosse..flying disc + | 0xF0 0x9F 0xA5 0x90..0x9E #E3.0 [15] (🥐..🥞) croissant..pancakes + | 0xF0 0x9F 0xA5 0x9F..0xAB #E5.0 [13] (🥟..🥫) dumpling..canned food + | 0xF0 0x9F 0xA5 0xAC..0xB0 #E11.0 [5] (🥬..🥰) leafy green..smilin... + | 0xF0 0x9F 0xA5 0xB1 #E12.0 [1] (🥱) yawning face + | 0xF0 0x9F 0xA5 0xB2 #E13.0 [1] (🥲) smiling face with tear + | 0xF0 0x9F 0xA5 0xB3..0xB6 #E11.0 [4] (🥳..🥶) partying face..cold... + | 0xF0 0x9F 0xA5 0xB7..0xB8 #E13.0 [2] (🥷..🥸) ninja..disguised face + | 0xF0 0x9F 0xA5 0xB9 #E0.0 [1] (🥹) + | 0xF0 0x9F 0xA5 0xBA #E11.0 [1] (🥺) pleading face + | 0xF0 0x9F 0xA5 0xBB #E12.0 [1] (🥻) sari + | 0xF0 0x9F 0xA5 0xBC..0xBF #E11.0 [4] (🥼..🥿) lab coat..flat shoe + | 0xF0 0x9F 0xA6 0x80..0x84 #E1.0 [5] (🦀..🦄) crab..unicorn + | 0xF0 0x9F 0xA6 0x85..0x91 #E3.0 [13] (🦅..🦑) eagle..squid + | 0xF0 0x9F 0xA6 0x92..0x97 #E5.0 [6] (🦒..🦗) giraffe..cricket + | 0xF0 0x9F 0xA6 0x98..0xA2 #E11.0 [11] (🦘..🦢) kangaroo..swan + | 0xF0 0x9F 0xA6 0xA3..0xA4 #E13.0 [2] (🦣..🦤) mammoth..dodo + | 0xF0 0x9F 0xA6 0xA5..0xAA #E12.0 [6] (🦥..🦪) sloth..oyster + | 0xF0 0x9F 0xA6 0xAB..0xAD #E13.0 [3] (🦫..🦭) beaver..seal + | 0xF0 0x9F 0xA6 0xAE..0xAF #E12.0 [2] (🦮..🦯) guide dog..white cane + | 0xF0 0x9F 0xA6 0xB0..0xB9 #E11.0 [10] (🦰..🦹) red hair..supervillain + | 0xF0 0x9F 0xA6 0xBA..0xBF #E12.0 [6] (🦺..🦿) safety vest..mechan... + | 0xF0 0x9F 0xA7 0x80 #E1.0 [1] (🧀) cheese wedge + | 0xF0 0x9F 0xA7 0x81..0x82 #E11.0 [2] (🧁..🧂) cupcake..salt + | 0xF0 0x9F 0xA7 0x83..0x8A #E12.0 [8] (🧃..🧊) beverage box..ice + | 0xF0 0x9F 0xA7 0x8B #E13.0 [1] (🧋) bubble tea + | 0xF0 0x9F 0xA7 0x8C #E0.0 [1] (🧌) + | 0xF0 0x9F 0xA7 0x8D..0x8F #E12.0 [3] (🧍..🧏) person standing..de... + | 0xF0 0x9F 0xA7 0x90..0xA6 #E5.0 [23] (🧐..🧦) face with monocle..... + | 0xF0 0x9F 0xA7 0xA7..0xBF #E11.0 [25] (🧧..🧿) red envelope..nazar... + | 0xF0 0x9F 0xA8 0x80..0xFF #E0.0 [112] (🨀..🩯) NEUTRAL CHESS KING.... + | 0xF0 0x9F 0xA9 0x00..0xAF # + | 0xF0 0x9F 0xA9 0xB0..0xB3 #E12.0 [4] (🩰..🩳) ballet shoes..shorts + | 0xF0 0x9F 0xA9 0xB4 #E13.0 [1] (🩴) thong sandal + | 0xF0 0x9F 0xA9 0xB5..0xB7 #E0.0 [3] (🩵..🩷) ..<... + | 0xF0 0x9F 0xA9 0xB8..0xBA #E12.0 [3] (🩸..🩺) drop of blood..stet... + | 0xF0 0x9F 0xA9 0xBB..0xBF #E0.0 [5] (🩻..🩿) ..<... + | 0xF0 0x9F 0xAA 0x80..0x82 #E12.0 [3] (🪀..🪂) yo-yo..parachute + | 0xF0 0x9F 0xAA 0x83..0x86 #E13.0 [4] (🪃..🪆) boomerang..nesting ... + | 0xF0 0x9F 0xAA 0x87..0x8F #E0.0 [9] (🪇..🪏) ..<... + | 0xF0 0x9F 0xAA 0x90..0x95 #E12.0 [6] (🪐..🪕) ringed planet..banjo + | 0xF0 0x9F 0xAA 0x96..0xA8 #E13.0 [19] (🪖..🪨) military helmet..rock + | 0xF0 0x9F 0xAA 0xA9..0xAF #E0.0 [7] (🪩..🪯) ..<... + | 0xF0 0x9F 0xAA 0xB0..0xB6 #E13.0 [7] (🪰..🪶) fly..feather + | 0xF0 0x9F 0xAA 0xB7..0xBF #E0.0 [9] (🪷..🪿) ..<... + | 0xF0 0x9F 0xAB 0x80..0x82 #E13.0 [3] (🫀..🫂) anatomical heart..p... + | 0xF0 0x9F 0xAB 0x83..0x8F #E0.0 [13] (🫃..🫏) ..<... + | 0xF0 0x9F 0xAB 0x90..0x96 #E13.0 [7] (🫐..🫖) blueberries..teapot + | 0xF0 0x9F 0xAB 0x97..0xBF #E0.0 [41] (🫗..🫿) ..<... + | 0xF0 0x9F 0xB0 0x80..0xFF #E0.0[1022] (🰀..🿽) 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 4: +//line NONE:1 + ts = p + +//line grapheme_clusters.go:3878 + } + } + + _keys = int(_graphclust_key_offsets[cs]) + _trans = int(_graphclust_index_offsets[cs]) + + _klen = int(_graphclust_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _graphclust_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_graphclust_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _graphclust_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_graphclust_indicies[_trans]) + _eof_trans: + cs = int(_graphclust_trans_targs[_trans]) + + if _graphclust_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_graphclust_trans_actions[_trans]) + _nacts = uint(_graphclust_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 0: +//line grapheme_clusters.rl:47 + + startPos = p + + case 1: +//line grapheme_clusters.rl:51 + + endPos = p + + case 5: +//line NONE:1 + te = p + 1 + + case 6: +//line grapheme_clusters.rl:55 + act = 3 + case 7: +//line grapheme_clusters.rl:55 + act = 4 + case 8: +//line grapheme_clusters.rl:55 + te = p + 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 9: +//line grapheme_clusters.rl:55 + te = p + 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 10: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 11: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 12: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 13: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 14: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 15: +//line grapheme_clusters.rl:55 + te = p + p-- + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 16: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 17: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 18: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 19: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 20: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 21: +//line grapheme_clusters.rl:55 + p = (te) - 1 + { + return endPos + 1, data[startPos : endPos+1], nil + } + case 22: +//line NONE:1 + switch act { + case 0: + { + cs = 0 + goto _again + } + case 3: + { + p = (te) - 1 + + return endPos + 1, data[startPos : endPos+1], nil + } + case 4: + { + p = (te) - 1 + + return endPos + 1, data[startPos : endPos+1], nil + } + } + +//line grapheme_clusters.go:4077 + } + } + + _again: + _acts = int(_graphclust_to_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 2: +//line NONE:1 + ts = 0 + + case 3: +//line NONE:1 + act = 0 + +//line grapheme_clusters.go:4095 + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + if _graphclust_eof_trans[cs] > 0 { + _trans = int(_graphclust_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: + { + } + } + +//line grapheme_clusters.rl:117 + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl new file mode 100644 index 00000000..737db18b --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl @@ -0,0 +1,133 @@ +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT +%%{ + # (except you are actually in grapheme_clusters.rl here, so edit away!) + + machine graphclust; + write data; +}%% + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + %%{ + include GraphemeCluster "grapheme_clusters_table.rl"; + include Emoji "emoji_table.rl"; + + action start { + startPos = p + } + + action end { + endPos = p + } + + action emit { + return endPos+1, data[startPos:endPos+1], nil + } + + ZWJGlue = ZWJ (Extended_Pictographic Extend*)?; + AnyExtender = Extend | ZWJGlue | SpacingMark; + Extension = AnyExtender*; + ReplacementChar = (0xEF 0xBF 0xBD); + + CRLFSeq = CR LF; + ControlSeq = Control | ReplacementChar; + HangulSeq = ( + L+ (((LV? V+ | LVT) T*)?|LV?) | + LV V* T* | + V+ T* | + LVT T* | + T+ + ) Extension; + EmojiSeq = Extended_Pictographic Extend* Extension; + ZWJSeq = ZWJ (ZWJ | Extend | SpacingMark)*; + EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + + # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension + OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|Extended_Pictographic|ZWJ|Regional_Indicator|Prepend)) (Extend | ZWJ | SpacingMark)*; + + # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break + PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?; + + CRLFTok = CRLFSeq >start @end; + ControlTok = ControlSeq >start @end; + HangulTok = HangulSeq >start @end; + EmojiTok = EmojiSeq >start @end; + ZWJTok = ZWJSeq >start @end; + EmojiFlagTok = EmojiFlagSeq >start @end; + OtherTok = OtherSeq >start @end; + PrependTok = PrependSeq >start @end; + + main := |* + CRLFTok => emit; + ControlTok => emit; + HangulTok => emit; + EmojiTok => emit; + ZWJTok => emit; + EmojiFlagTok => emit; + PrependTok => emit; + OtherTok => emit; + + # any single valid UTF-8 character would also be valid per spec, + # but we'll handle that separately after the loop so we can deal + # with requesting more bytes if we're not at EOF. + *|; + + write init; + write exec; + }%% + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} \ No newline at end of file diff --git a/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl new file mode 100644 index 00000000..803dca19 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl @@ -0,0 +1,1609 @@ +# The following Ragel file was autogenerated with unicode2ragel.rb +# from: https://www.unicode.org/Public/13.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +# +# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "ZWJ"]. +# +# To use this, make sure that your alphtype is set to byte, +# and that your input is in utf8. + +%%{ + machine GraphemeCluster; + + Prepend = + 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... + | 0xDB 0x9D #Cf ARABIC END OF AYAH + | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK + | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH + | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH + | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN + | 0xF0 0x91 0x83 0x8D #Cf KAITHI NUMBER SIGN ABOVE + | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... + | 0xF0 0x91 0xA4 0xBF #Lo DIVES AKURU PREFIXED NASAL SIGN + | 0xF0 0x91 0xA5 0x81 #Lo DIVES AKURU INITIAL RA + | 0xF0 0x91 0xA8 0xBA #Lo ZANABAZAR SQUARE CLUSTER-INITIAL L... + | 0xF0 0x91 0xAA 0x84..0x89 #Lo [6] SOYOMBO SIGN JIHVAMULIYA..SOYOM... + | 0xF0 0x91 0xB5 0x86 #Lo MASARAM GONDI REPHA + ; + + CR = + 0x0D #Cc + ; + + LF = + 0x0A #Cc + ; + + Control = + 0x00..0x09 #Cc [10] .. + | 0x0B..0x0C #Cc [2] .. + | 0x0E..0x1F #Cc [18] .. + | 0x7F #Cc [33] .. + | 0xC2 0x80..0x9F # + | 0xC2 0xAD #Cf SOFT HYPHEN + | 0xD8 0x9C #Cf ARABIC LETTER MARK + | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR + | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE + | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... + | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR + | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR + | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... + | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS + | 0xE2 0x81 0xA5 #Cn + | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... + | 0xEF 0xBB 0xBF #Cf ZERO WIDTH NO-BREAK SPACE + | 0xEF 0xBF 0xB0..0xB8 #Cn [9] .. + | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... + | 0xF0 0x93 0x90 0xB0..0xB8 #Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JO... + | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... + | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... + | 0xF3 0xA0 0x80 0x80 #Cn + | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG + | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. + | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. + | 0xF3 0xA0 0x83 0x00..0xBF # + | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] .. +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + URI.open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} *#/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts < + + + + + + +``` + +### Reading an XML file + +Suppose you have a file on disk called `bookstore.xml` containing the +following data: + +```xml + + + + Everyday Italian + Giada De Laurentiis + 2005 + 30.00 + + + + Harry Potter + J K. Rowling + 2005 + 29.99 + + + + XQuery Kick Start + James McGovern + Per Bothner + Kurt Cagle + James Linn + Vaidyanathan Nagarajan + 2003 + 49.99 + + + + Learning XML + Erik T. Ray + 2003 + 39.95 + + + +``` + +This code reads the file's contents into an etree document. +```go +doc := etree.NewDocument() +if err := doc.ReadFromFile("bookstore.xml"); err != nil { + panic(err) +} +``` + +You can also read XML from a string, a byte slice, or an `io.Reader`. + +### Processing elements and attributes + +This example illustrates several ways to access elements and attributes using +etree selection queries. +```go +root := doc.SelectElement("bookstore") +fmt.Println("ROOT element:", root.Tag) + +for _, book := range root.SelectElements("book") { + fmt.Println("CHILD element:", book.Tag) + if title := book.SelectElement("title"); title != nil { + lang := title.SelectAttrValue("lang", "unknown") + fmt.Printf(" TITLE: %s (%s)\n", title.Text(), lang) + } + for _, attr := range book.Attr { + fmt.Printf(" ATTR: %s=%s\n", attr.Key, attr.Value) + } +} +``` +Output: +``` +ROOT element: bookstore +CHILD element: book + TITLE: Everyday Italian (en) + ATTR: category=COOKING +CHILD element: book + TITLE: Harry Potter (en) + ATTR: category=CHILDREN +CHILD element: book + TITLE: XQuery Kick Start (en) + ATTR: category=WEB +CHILD element: book + TITLE: Learning XML (en) + ATTR: category=WEB +``` + +### Path queries + +This example uses etree's path functions to select all book titles that fall +into the category of 'WEB'. The double-slash prefix in the path causes the +search for book elements to occur recursively; book elements may appear at any +level of the XML hierarchy. +```go +for _, t := range doc.FindElements("//book[@category='WEB']/title") { + fmt.Println("Title:", t.Text()) +} +``` + +Output: +``` +Title: XQuery Kick Start +Title: Learning XML +``` + +This example finds the first book element under the root bookstore element and +outputs the tag and text of each of its child elements. +```go +for _, e := range doc.FindElements("./bookstore/book[1]/*") { + fmt.Printf("%s: %s\n", e.Tag, e.Text()) +} +``` + +Output: +``` +title: Everyday Italian +author: Giada De Laurentiis +year: 2005 +price: 30.00 +``` + +This example finds all books with a price of 49.99 and outputs their titles. +```go +path := etree.MustCompilePath("./bookstore/book[p:price='49.99']/title") +for _, e := range doc.FindElementsPath(path) { + fmt.Println(e.Text()) +} +``` + +Output: +``` +XQuery Kick Start +``` + +Note that this example uses the FindElementsPath function, which takes as an +argument a pre-compiled path object. Use precompiled paths when you plan to +search with the same path more than once. + +### Other features + +These are just a few examples of the things the etree package can do. See the +[documentation](http://godoc.org/github.com/beevik/etree) for a complete +description of its capabilities. + +### Contributing + +This project accepts contributions. Just fork the repo and submit a pull +request! diff --git a/vendor/github.com/beevik/etree/RELEASE_NOTES.md b/vendor/github.com/beevik/etree/RELEASE_NOTES.md new file mode 100644 index 00000000..4a2ce2ab --- /dev/null +++ b/vendor/github.com/beevik/etree/RELEASE_NOTES.md @@ -0,0 +1,153 @@ +Release v1.2.0 +============== + +**New Features** + +* Add the ability to write XML fragments using Token WriteTo functions. +* Add the ability to re-indent an XML element as though it were the root of + the document. +* Add a ReadSettings option to preserve CDATA blocks when reading and XML + document. + +Release v1.1.4 +============== + +**New Features** + +* Add the ability to preserve whitespace in leaf elements during indent. +* Add the ability to suppress a document-trailing newline during indent. +* Add choice of XML attribute quoting style (single-quote or double-quote). + +**Removed Features** + +* Removed the CDATA preservation change introduced in v1.1.3. It was + implemented in a way that broke the ability to process XML documents + encoded using non-UTF8 character sets. + +Release v1.1.3 +============== + +* XML reads now preserve CDATA sections instead of converting them to + standard character data. + +Release v1.1.2 +============== + +* Fixed a path parsing bug. +* The `Element.Text` function now handles comments embedded between + character data spans. + +Release v1.1.1 +============== + +* Updated go version in `go.mod` to 1.20 + +Release v1.1.0 +============== + +**New Features** + +* New attribute helpers. + * Added the `Element.SortAttrs` method, which lexicographically sorts an + element's attributes by key. +* New `ReadSettings` properties. + * Added `Entity` for the support of custom entity maps. +* New `WriteSettings` properties. + * Added `UseCRLF` to allow the output of CR-LF newlines instead of the + default LF newlines. This is useful on Windows systems. +* Additional support for text and CDATA sections. + * The `Element.Text` method now returns the concatenation of all consecutive + character data tokens immediately following an element's opening tag. + * Added `Element.SetCData` to replace the character data immediately + following an element's opening tag with a CDATA section. + * Added `Element.CreateCData` to create and add a CDATA section child + `CharData` token to an element. + * Added `Element.CreateText` to create and add a child text `CharData` token + to an element. + * Added `NewCData` to create a parentless CDATA section `CharData` token. + * Added `NewText` to create a parentless text `CharData` + token. + * Added `CharData.IsCData` to detect if the token contains a CDATA section. + * Added `CharData.IsWhitespace` to detect if the token contains whitespace + inserted by one of the document Indent functions. + * Modified `Element.SetText` so that it replaces a run of consecutive + character data tokens following the element's opening tag (instead of just + the first one). +* New "tail text" support. + * Added the `Element.Tail` method, which returns the text immediately + following an element's closing tag. + * Added the `Element.SetTail` method, which modifies the text immediately + following an element's closing tag. +* New element child insertion and removal methods. + * Added the `Element.InsertChildAt` method, which inserts a new child token + before the specified child token index. + * Added the `Element.RemoveChildAt` method, which removes the child token at + the specified child token index. +* New element and attribute queries. + * Added the `Element.Index` method, which returns the element's index within + its parent element's child token list. + * Added the `Element.NamespaceURI` method to return the namespace URI + associated with an element. + * Added the `Attr.NamespaceURI` method to return the namespace URI + associated with an element. + * Added the `Attr.Element` method to return the element that an attribute + belongs to. +* New Path filter functions. + * Added `[local-name()='val']` to keep elements whose unprefixed tag matches + the desired value. + * Added `[name()='val']` to keep elements whose full tag matches the desired + value. + * Added `[namespace-prefix()='val']` to keep elements whose namespace prefix + matches the desired value. + * Added `[namespace-uri()='val']` to keep elements whose namespace URI + matches the desired value. + +**Bug Fixes** + +* A default XML `CharSetReader` is now used to prevent failed parsing of XML + documents using certain encodings. + ([Issue](https://github.com/beevik/etree/issues/53)). +* All characters are now properly escaped according to XML parsing rules. + ([Issue](https://github.com/beevik/etree/issues/55)). +* The `Document.Indent` and `Document.IndentTabs` functions no longer insert + empty string `CharData` tokens. + +**Deprecated** + +* `Element` + * The `InsertChild` method is deprecated. Use `InsertChildAt` instead. + * The `CreateCharData` method is deprecated. Use `CreateText` instead. +* `CharData` + * The `NewCharData` method is deprecated. Use `NewText` instead. + + +Release v1.0.1 +============== + +**Changes** + +* Added support for absolute etree Path queries. An absolute path begins with + `/` or `//` and begins its search from the element's document root. +* Added [`GetPath`](https://godoc.org/github.com/beevik/etree#Element.GetPath) + and [`GetRelativePath`](https://godoc.org/github.com/beevik/etree#Element.GetRelativePath) + functions to the [`Element`](https://godoc.org/github.com/beevik/etree#Element) + type. + +**Breaking changes** + +* A path starting with `//` is now interpreted as an absolute path. + Previously, it was interpreted as a relative path starting from the element + whose + [`FindElement`](https://godoc.org/github.com/beevik/etree#Element.FindElement) + method was called. To remain compatible with this release, all paths + prefixed with `//` should be prefixed with `.//` when called from any + element other than the document's root. +* [**edit 2/1/2019**]: Minor releases should not contain breaking changes. + Even though this breaking change was very minor, it was a mistake to include + it in this minor release. In the future, all breaking changes will be + limited to major releases (e.g., version 2.0.0). + +Release v1.0.0 +============== + +Initial release. diff --git a/vendor/github.com/beevik/etree/etree.go b/vendor/github.com/beevik/etree/etree.go new file mode 100644 index 00000000..83df8b2f --- /dev/null +++ b/vendor/github.com/beevik/etree/etree.go @@ -0,0 +1,1666 @@ +// Copyright 2015-2019 Brett Vickers. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package etree provides XML services through an Element Tree +// abstraction. +package etree + +import ( + "bufio" + "bytes" + "encoding/xml" + "errors" + "io" + "os" + "sort" + "strings" +) + +const ( + // NoIndent is used with the IndentSettings record to remove all + // indenting. + NoIndent = -1 +) + +// ErrXML is returned when XML parsing fails due to incorrect formatting. +var ErrXML = errors.New("etree: invalid XML format") + +// cdataPrefix is used to detect CDATA text when ReadSettings.PreserveCData is +// true. +var cdataPrefix = []byte(". If false, XML character references + // are also produced for " and '. Default: false. + CanonicalText bool + + // CanonicalAttrVal forces the production of XML character references for + // attribute value characters &, < and ". If false, XML character + // references are also produced for > and '. Default: false. + CanonicalAttrVal bool + + // AttrSingleQuote causes attributes to use single quotes (attr='example') + // instead of double quotes (attr = "example") when set to true. Default: + // false. + AttrSingleQuote bool + + // UseCRLF causes the document's Indent* methods to use a carriage return + // followed by a linefeed ("\r\n") when outputting a newline. If false, + // only a linefeed is used ("\n"). Default: false. + // + // Deprecated: UseCRLF is deprecated. Use IndentSettings.UseCRLF instead. + UseCRLF bool +} + +// newWriteSettings creates a default WriteSettings record. +func newWriteSettings() WriteSettings { + return WriteSettings{ + CanonicalEndTags: false, + CanonicalText: false, + CanonicalAttrVal: false, + AttrSingleQuote: false, + UseCRLF: false, + } +} + +// dup creates a duplicate of the WriteSettings object. +func (s *WriteSettings) dup() WriteSettings { + return *s +} + +// IndentSettings determine the behavior of the Document's Indent* methods. +type IndentSettings struct { + // Spaces indicates the number of spaces to insert for each level of + // indentation. Set to etree.NoIndent to remove all indentation. Ignored + // when UseTabs is true. Default: 4. + Spaces int + + // UseTabs causes tabs to be used instead of spaces when indenting. + // Default: false. + UseTabs bool + + // UseCRLF causes newlines to be written as a carriage return followed by + // a linefeed ("\r\n"). If false, only a linefeed character is output + // for a newline ("\n"). Default: false. + UseCRLF bool + + // PreserveLeafWhitespace causes indent methods to preserve whitespace + // within XML elements containing only non-CDATA character data. Default: + // false. + PreserveLeafWhitespace bool + + // SuppressTrailingWhitespace suppresses the generation of a trailing + // whitespace characters (such as newlines) at the end of the indented + // document. Default: false. + SuppressTrailingWhitespace bool +} + +// NewIndentSettings creates a default IndentSettings record. +func NewIndentSettings() *IndentSettings { + return &IndentSettings{ + Spaces: 4, + UseTabs: false, + UseCRLF: false, + PreserveLeafWhitespace: false, + SuppressTrailingWhitespace: false, + } +} + +type indentFunc func(depth int) string + +func getIndentFunc(s *IndentSettings) indentFunc { + if s.UseTabs { + if s.UseCRLF { + return func(depth int) string { return indentCRLF(depth, indentTabs) } + } else { + return func(depth int) string { return indentLF(depth, indentTabs) } + } + } else { + if s.Spaces < 0 { + return func(depth int) string { return "" } + } else if s.UseCRLF { + return func(depth int) string { return indentCRLF(depth*s.Spaces, indentSpaces) } + } else { + return func(depth int) string { return indentLF(depth*s.Spaces, indentSpaces) } + } + } +} + +// Writer is the interface that wraps the Write* methods called by each token +// type's WriteTo function. +type Writer interface { + io.StringWriter + io.ByteWriter + io.Writer +} + +// A Token is an interface type used to represent XML elements, character +// data, CDATA sections, XML comments, XML directives, and XML processing +// instructions. +type Token interface { + Parent() *Element + Index() int + WriteTo(w Writer, s *WriteSettings) + dup(parent *Element) Token + setParent(parent *Element) + setIndex(index int) +} + +// A Document is a container holding a complete XML tree. +// +// A document has a single embedded element, which contains zero or more child +// tokens, one of which is usually the root element. The embedded element may +// include other children such as processing instruction tokens or character +// data tokens. The document's embedded element is never directly serialized; +// only its children are. +// +// A document also contains read and write settings, which influence the way +// the document is deserialized, serialized, and indented. +type Document struct { + Element + ReadSettings ReadSettings + WriteSettings WriteSettings +} + +// An Element represents an XML element, its attributes, and its child tokens. +type Element struct { + Space, Tag string // namespace prefix and tag + Attr []Attr // key-value attribute pairs + Child []Token // child tokens (elements, comments, etc.) + parent *Element // parent element + index int // token index in parent's children +} + +// An Attr represents a key-value attribute within an XML element. +type Attr struct { + Space, Key string // The attribute's namespace prefix and key + Value string // The attribute value string + element *Element // element containing the attribute +} + +// charDataFlags are used with CharData tokens to store additional settings. +type charDataFlags uint8 + +const ( + // The CharData contains only whitespace. + whitespaceFlag charDataFlags = 1 << iota + + // The CharData contains a CDATA section. + cdataFlag +) + +// CharData may be used to represent simple text data or a CDATA section +// within an XML document. The Data property should never be modified +// directly; use the SetData method instead. +type CharData struct { + Data string // the simple text or CDATA section content + parent *Element + index int + flags charDataFlags +} + +// A Comment represents an XML comment. +type Comment struct { + Data string // the comment's text + parent *Element + index int +} + +// A Directive represents an XML directive. +type Directive struct { + Data string // the directive string + parent *Element + index int +} + +// A ProcInst represents an XML processing instruction. +type ProcInst struct { + Target string // the processing instruction target + Inst string // the processing instruction value + parent *Element + index int +} + +// NewDocument creates an XML document without a root element. +func NewDocument() *Document { + return &Document{ + Element: Element{Child: make([]Token, 0)}, + ReadSettings: newReadSettings(), + WriteSettings: newWriteSettings(), + } +} + +// NewDocumentWithRoot creates an XML document and sets the element 'e' as its +// root element. If the element 'e' is already part of another document, it is +// first removed from its existing document. +func NewDocumentWithRoot(e *Element) *Document { + d := NewDocument() + d.SetRoot(e) + return d +} + +// Copy returns a recursive, deep copy of the document. +func (d *Document) Copy() *Document { + return &Document{ + Element: *(d.Element.dup(nil).(*Element)), + ReadSettings: d.ReadSettings.dup(), + WriteSettings: d.WriteSettings.dup(), + } +} + +// Root returns the root element of the document. It returns nil if there is +// no root element. +func (d *Document) Root() *Element { + for _, t := range d.Child { + if c, ok := t.(*Element); ok { + return c + } + } + return nil +} + +// SetRoot replaces the document's root element with the element 'e'. If the +// document already has a root element when this function is called, then the +// existing root element is unbound from the document. If the element 'e' is +// part of another document, then it is unbound from the other document. +func (d *Document) SetRoot(e *Element) { + if e.parent != nil { + e.parent.RemoveChild(e) + } + + // If there is already a root element, replace it. + p := &d.Element + for i, t := range p.Child { + if _, ok := t.(*Element); ok { + t.setParent(nil) + t.setIndex(-1) + p.Child[i] = e + e.setParent(p) + e.setIndex(i) + return + } + } + + // No existing root element, so add it. + p.addChild(e) +} + +// ReadFrom reads XML from the reader 'r' into this document. The function +// returns the number of bytes read and any error encountered. +func (d *Document) ReadFrom(r io.Reader) (n int64, err error) { + return d.Element.readFrom(r, d.ReadSettings) +} + +// ReadFromFile reads XML from a local file at path 'filepath' into this +// document. +func (d *Document) ReadFromFile(filepath string) error { + f, err := os.Open(filepath) + if err != nil { + return err + } + defer f.Close() + _, err = d.ReadFrom(f) + return err +} + +// ReadFromBytes reads XML from the byte slice 'b' into the this document. +func (d *Document) ReadFromBytes(b []byte) error { + _, err := d.ReadFrom(bytes.NewReader(b)) + return err +} + +// ReadFromString reads XML from the string 's' into this document. +func (d *Document) ReadFromString(s string) error { + _, err := d.ReadFrom(strings.NewReader(s)) + return err +} + +// WriteTo serializes the document out to the writer 'w'. The function returns +// the number of bytes written and any error encountered. +func (d *Document) WriteTo(w io.Writer) (n int64, err error) { + xw := newXmlWriter(w) + b := bufio.NewWriter(xw) + for _, c := range d.Child { + c.WriteTo(b, &d.WriteSettings) + } + err, n = b.Flush(), xw.bytes + return +} + +// WriteToFile serializes the document out to the file at path 'filepath'. +func (d *Document) WriteToFile(filepath string) error { + f, err := os.Create(filepath) + if err != nil { + return err + } + defer f.Close() + _, err = d.WriteTo(f) + return err +} + +// WriteToBytes serializes this document into a slice of bytes. +func (d *Document) WriteToBytes() (b []byte, err error) { + var buf bytes.Buffer + if _, err = d.WriteTo(&buf); err != nil { + return + } + return buf.Bytes(), nil +} + +// WriteToString serializes this document into a string. +func (d *Document) WriteToString() (s string, err error) { + var b []byte + if b, err = d.WriteToBytes(); err != nil { + return + } + return string(b), nil +} + +// Indent modifies the document's element tree by inserting character data +// tokens containing newlines and spaces for indentation. The amount of +// indentation per depth level is given by the 'spaces' parameter. Other than +// the number of spaces, default IndentSettings are used. +func (d *Document) Indent(spaces int) { + s := NewIndentSettings() + s.Spaces = spaces + d.IndentWithSettings(s) +} + +// IndentTabs modifies the document's element tree by inserting CharData +// tokens containing newlines and tabs for indentation. One tab is used per +// indentation level. Other than the use of tabs, default IndentSettings +// are used. +func (d *Document) IndentTabs() { + s := NewIndentSettings() + s.UseTabs = true + d.IndentWithSettings(s) +} + +// IndentWithSettings modifies the document's element tree by inserting +// character data tokens containing newlines and indentation. The behavior +// of the indentation algorithm is configured by the indent settings. +func (d *Document) IndentWithSettings(s *IndentSettings) { + // WriteSettings.UseCRLF is deprecated. Until removed from the package, it + // overrides IndentSettings.UseCRLF when true. + if d.WriteSettings.UseCRLF { + s.UseCRLF = true + } + + d.Element.indent(0, getIndentFunc(s), s) + + if s.SuppressTrailingWhitespace { + d.Element.stripTrailingWhitespace() + } +} + +// Unindent modifies the document's element tree by removing character data +// tokens containing only whitespace. Other than the removal of indentation, +// default IndentSettings are used. +func (d *Document) Unindent() { + s := NewIndentSettings() + s.Spaces = NoIndent + d.IndentWithSettings(s) +} + +// NewElement creates an unparented element with the specified tag (i.e., +// name). The tag may include a namespace prefix followed by a colon. +func NewElement(tag string) *Element { + space, stag := spaceDecompose(tag) + return newElement(space, stag, nil) +} + +// newElement is a helper function that creates an element and binds it to +// a parent element if possible. +func newElement(space, tag string, parent *Element) *Element { + e := &Element{ + Space: space, + Tag: tag, + Attr: make([]Attr, 0), + Child: make([]Token, 0), + parent: parent, + index: -1, + } + if parent != nil { + parent.addChild(e) + } + return e +} + +// Copy creates a recursive, deep copy of the element and all its attributes +// and children. The returned element has no parent but can be parented to a +// another element using AddChild, or added to a document with SetRoot or +// NewDocumentWithRoot. +func (e *Element) Copy() *Element { + return e.dup(nil).(*Element) +} + +// FullTag returns the element e's complete tag, including namespace prefix if +// present. +func (e *Element) FullTag() string { + if e.Space == "" { + return e.Tag + } + return e.Space + ":" + e.Tag +} + +// NamespaceURI returns the XML namespace URI associated with the element. If +// the element is part of the XML default namespace, NamespaceURI returns the +// empty string. +func (e *Element) NamespaceURI() string { + if e.Space == "" { + return e.findDefaultNamespaceURI() + } + return e.findLocalNamespaceURI(e.Space) +} + +// findLocalNamespaceURI finds the namespace URI corresponding to the +// requested prefix. +func (e *Element) findLocalNamespaceURI(prefix string) string { + for _, a := range e.Attr { + if a.Space == "xmlns" && a.Key == prefix { + return a.Value + } + } + + if e.parent == nil { + return "" + } + + return e.parent.findLocalNamespaceURI(prefix) +} + +// findDefaultNamespaceURI finds the default namespace URI of the element. +func (e *Element) findDefaultNamespaceURI() string { + for _, a := range e.Attr { + if a.Space == "" && a.Key == "xmlns" { + return a.Value + } + } + + if e.parent == nil { + return "" + } + + return e.parent.findDefaultNamespaceURI() +} + +// namespacePrefix returns the namespace prefix associated with the element. +func (e *Element) namespacePrefix() string { + return e.Space +} + +// name returns the tag associated with the element. +func (e *Element) name() string { + return e.Tag +} + +// Text returns all character data immediately following the element's opening +// tag. +func (e *Element) Text() string { + if len(e.Child) == 0 { + return "" + } + + text := "" + for _, ch := range e.Child { + if cd, ok := ch.(*CharData); ok { + if text == "" { + text = cd.Data + } else { + text += cd.Data + } + } else if _, ok := ch.(*Comment); ok { + // ignore + } else { + break + } + } + return text +} + +// SetText replaces all character data immediately following an element's +// opening tag with the requested string. +func (e *Element) SetText(text string) { + e.replaceText(0, text, 0) +} + +// SetCData replaces all character data immediately following an element's +// opening tag with a CDATA section. +func (e *Element) SetCData(text string) { + e.replaceText(0, text, cdataFlag) +} + +// Tail returns all character data immediately following the element's end +// tag. +func (e *Element) Tail() string { + if e.Parent() == nil { + return "" + } + + p := e.Parent() + i := e.Index() + + text := "" + for _, ch := range p.Child[i+1:] { + if cd, ok := ch.(*CharData); ok { + if text == "" { + text = cd.Data + } else { + text += cd.Data + } + } else { + break + } + } + return text +} + +// SetTail replaces all character data immediately following the element's end +// tag with the requested string. +func (e *Element) SetTail(text string) { + if e.Parent() == nil { + return + } + + p := e.Parent() + p.replaceText(e.Index()+1, text, 0) +} + +// replaceText is a helper function that replaces a series of chardata tokens +// starting at index i with the requested text. +func (e *Element) replaceText(i int, text string, flags charDataFlags) { + end := e.findTermCharDataIndex(i) + + switch { + case end == i: + if text != "" { + // insert a new chardata token at index i + cd := newCharData(text, flags, nil) + e.InsertChildAt(i, cd) + } + + case end == i+1: + if text == "" { + // remove the chardata token at index i + e.RemoveChildAt(i) + } else { + // replace the first and only character token at index i + cd := e.Child[i].(*CharData) + cd.Data, cd.flags = text, flags + } + + default: + if text == "" { + // remove all chardata tokens starting from index i + copy(e.Child[i:], e.Child[end:]) + removed := end - i + e.Child = e.Child[:len(e.Child)-removed] + for j := i; j < len(e.Child); j++ { + e.Child[j].setIndex(j) + } + } else { + // replace the first chardata token at index i and remove all + // subsequent chardata tokens + cd := e.Child[i].(*CharData) + cd.Data, cd.flags = text, flags + copy(e.Child[i+1:], e.Child[end:]) + removed := end - (i + 1) + e.Child = e.Child[:len(e.Child)-removed] + for j := i + 1; j < len(e.Child); j++ { + e.Child[j].setIndex(j) + } + } + } +} + +// findTermCharDataIndex finds the index of the first child token that isn't +// a CharData token. It starts from the requested start index. +func (e *Element) findTermCharDataIndex(start int) int { + for i := start; i < len(e.Child); i++ { + if _, ok := e.Child[i].(*CharData); !ok { + return i + } + } + return len(e.Child) +} + +// CreateElement creates a new element with the specified tag (i.e., name) and +// adds it as the last child token of this element. The tag may include a +// prefix followed by a colon. +func (e *Element) CreateElement(tag string) *Element { + space, stag := spaceDecompose(tag) + return newElement(space, stag, e) +} + +// AddChild adds the token 't' as the last child of the element. If token 't' +// was already the child of another element, it is first removed from its +// parent element. +func (e *Element) AddChild(t Token) { + if t.Parent() != nil { + t.Parent().RemoveChild(t) + } + e.addChild(t) +} + +// InsertChild inserts the token 't' into this element's list of children just +// before the element's existing child token 'ex'. If the existing element +// 'ex' does not appear in this element's list of child tokens, then 't' is +// added to the end of this element's list of child tokens. If token 't' is +// already the child of another element, it is first removed from the other +// element's list of child tokens. +// +// Deprecated: InsertChild is deprecated. Use InsertChildAt instead. +func (e *Element) InsertChild(ex Token, t Token) { + if ex == nil || ex.Parent() != e { + e.AddChild(t) + return + } + + if t.Parent() != nil { + t.Parent().RemoveChild(t) + } + + t.setParent(e) + + i := ex.Index() + e.Child = append(e.Child, nil) + copy(e.Child[i+1:], e.Child[i:]) + e.Child[i] = t + + for j := i; j < len(e.Child); j++ { + e.Child[j].setIndex(j) + } +} + +// InsertChildAt inserts the token 't' into this element's list of child +// tokens just before the requested 'index'. If the index is greater than or +// equal to the length of the list of child tokens, then the token 't' is +// added to the end of the list of child tokens. +func (e *Element) InsertChildAt(index int, t Token) { + if index >= len(e.Child) { + e.AddChild(t) + return + } + + if t.Parent() != nil { + if t.Parent() == e && t.Index() > index { + index-- + } + t.Parent().RemoveChild(t) + } + + t.setParent(e) + + e.Child = append(e.Child, nil) + copy(e.Child[index+1:], e.Child[index:]) + e.Child[index] = t + + for j := index; j < len(e.Child); j++ { + e.Child[j].setIndex(j) + } +} + +// RemoveChild attempts to remove the token 't' from this element's list of +// child tokens. If the token 't' was a child of this element, then it is +// removed and returned. Otherwise, nil is returned. +func (e *Element) RemoveChild(t Token) Token { + if t.Parent() != e { + return nil + } + return e.RemoveChildAt(t.Index()) +} + +// RemoveChildAt removes the child token appearing in slot 'index' of this +// element's list of child tokens. The removed child token is then returned. +// If the index is out of bounds, no child is removed and nil is returned. +func (e *Element) RemoveChildAt(index int) Token { + if index >= len(e.Child) { + return nil + } + + t := e.Child[index] + for j := index + 1; j < len(e.Child); j++ { + e.Child[j].setIndex(j - 1) + } + e.Child = append(e.Child[:index], e.Child[index+1:]...) + t.setIndex(-1) + t.setParent(nil) + return t +} + +// ReadFrom reads XML from the reader 'ri' and stores the result as a new +// child of this element. +func (e *Element) readFrom(ri io.Reader, settings ReadSettings) (n int64, err error) { + var r xmlReader + var pr *xmlPeekReader + if settings.PreserveCData { + pr = newXmlPeekReader(ri) + r = pr + } else { + r = newXmlSimpleReader(ri) + } + + dec := xml.NewDecoder(r) + dec.CharsetReader = settings.CharsetReader + dec.Strict = !settings.Permissive + dec.Entity = settings.Entity + + var stack stack + stack.push(e) + for { + if pr != nil { + pr.PeekPrepare(dec.InputOffset(), len(cdataPrefix)) + } + + t, err := dec.RawToken() + + switch { + case err == io.EOF: + if len(stack.data) != 1 { + return r.Bytes(), ErrXML + } + return r.Bytes(), nil + case err != nil: + return r.Bytes(), err + case stack.empty(): + return r.Bytes(), ErrXML + } + + top := stack.peek().(*Element) + + switch t := t.(type) { + case xml.StartElement: + e := newElement(t.Name.Space, t.Name.Local, top) + for _, a := range t.Attr { + e.createAttr(a.Name.Space, a.Name.Local, a.Value, e) + } + stack.push(e) + case xml.EndElement: + if top.Tag != t.Name.Local || top.Space != t.Name.Space { + return r.Bytes(), ErrXML + } + stack.pop() + case xml.CharData: + data := string(t) + var flags charDataFlags + if pr != nil { + peekBuf := pr.PeekFinalize() + if bytes.Equal(peekBuf, cdataPrefix) { + flags = cdataFlag + } else if isWhitespace(data) { + flags = whitespaceFlag + } + } else { + if isWhitespace(data) { + flags = whitespaceFlag + } + } + newCharData(data, flags, top) + case xml.Comment: + newComment(string(t), top) + case xml.Directive: + newDirective(string(t), top) + case xml.ProcInst: + newProcInst(t.Target, string(t.Inst), top) + } + } +} + +// SelectAttr finds an element attribute matching the requested 'key' and, if +// found, returns a pointer to the matching attribute. The function returns +// nil if no matching attribute is found. The key may include a namespace +// prefix followed by a colon. +func (e *Element) SelectAttr(key string) *Attr { + space, skey := spaceDecompose(key) + for i, a := range e.Attr { + if spaceMatch(space, a.Space) && skey == a.Key { + return &e.Attr[i] + } + } + return nil +} + +// SelectAttrValue finds an element attribute matching the requested 'key' and +// returns its value if found. If no matching attribute is found, the function +// returns the 'dflt' value instead. The key may include a namespace prefix +// followed by a colon. +func (e *Element) SelectAttrValue(key, dflt string) string { + space, skey := spaceDecompose(key) + for _, a := range e.Attr { + if spaceMatch(space, a.Space) && skey == a.Key { + return a.Value + } + } + return dflt +} + +// ChildElements returns all elements that are children of this element. +func (e *Element) ChildElements() []*Element { + var elements []*Element + for _, t := range e.Child { + if c, ok := t.(*Element); ok { + elements = append(elements, c) + } + } + return elements +} + +// SelectElement returns the first child element with the given 'tag' (i.e., +// name). The function returns nil if no child element matching the tag is +// found. The tag may include a namespace prefix followed by a colon. +func (e *Element) SelectElement(tag string) *Element { + space, stag := spaceDecompose(tag) + for _, t := range e.Child { + if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { + return c + } + } + return nil +} + +// SelectElements returns a slice of all child elements with the given 'tag' +// (i.e., name). The tag may include a namespace prefix followed by a colon. +func (e *Element) SelectElements(tag string) []*Element { + space, stag := spaceDecompose(tag) + var elements []*Element + for _, t := range e.Child { + if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { + elements = append(elements, c) + } + } + return elements +} + +// FindElement returns the first element matched by the XPath-like 'path' +// string. The function returns nil if no child element is found using the +// path. It panics if an invalid path string is supplied. +func (e *Element) FindElement(path string) *Element { + return e.FindElementPath(MustCompilePath(path)) +} + +// FindElementPath returns the first element matched by the 'path' object. The +// function returns nil if no element is found using the path. +func (e *Element) FindElementPath(path Path) *Element { + p := newPather() + elements := p.traverse(e, path) + if len(elements) > 0 { + return elements[0] + } + return nil +} + +// FindElements returns a slice of elements matched by the XPath-like 'path' +// string. The function returns nil if no child element is found using the +// path. It panics if an invalid path string is supplied. +func (e *Element) FindElements(path string) []*Element { + return e.FindElementsPath(MustCompilePath(path)) +} + +// FindElementsPath returns a slice of elements matched by the 'path' object. +func (e *Element) FindElementsPath(path Path) []*Element { + p := newPather() + return p.traverse(e, path) +} + +// GetPath returns the absolute path of the element. The absolute path is the +// full path from the document's root. +func (e *Element) GetPath() string { + path := []string{} + for seg := e; seg != nil; seg = seg.Parent() { + if seg.Tag != "" { + path = append(path, seg.Tag) + } + } + + // Reverse the path. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + + return "/" + strings.Join(path, "/") +} + +// GetRelativePath returns the path of this element relative to the 'source' +// element. If the two elements are not part of the same element tree, then +// the function returns the empty string. +func (e *Element) GetRelativePath(source *Element) string { + var path []*Element + + if source == nil { + return "" + } + + // Build a reverse path from the element toward the root. Stop if the + // source element is encountered. + var seg *Element + for seg = e; seg != nil && seg != source; seg = seg.Parent() { + path = append(path, seg) + } + + // If we found the source element, reverse the path and compose the + // string. + if seg == source { + if len(path) == 0 { + return "." + } + parts := []string{} + for i := len(path) - 1; i >= 0; i-- { + parts = append(parts, path[i].Tag) + } + return "./" + strings.Join(parts, "/") + } + + // The source wasn't encountered, so climb from the source element toward + // the root of the tree until an element in the reversed path is + // encountered. + + findPathIndex := func(e *Element, path []*Element) int { + for i, ee := range path { + if e == ee { + return i + } + } + return -1 + } + + climb := 0 + for seg = source; seg != nil; seg = seg.Parent() { + i := findPathIndex(seg, path) + if i >= 0 { + path = path[:i] // truncate at found segment + break + } + climb++ + } + + // No element in the reversed path was encountered, so the two elements + // must not be part of the same tree. + if seg == nil { + return "" + } + + // Reverse the (possibly truncated) path and prepend ".." segments to + // climb. + parts := []string{} + for i := 0; i < climb; i++ { + parts = append(parts, "..") + } + for i := len(path) - 1; i >= 0; i-- { + parts = append(parts, path[i].Tag) + } + return strings.Join(parts, "/") +} + +// IndentWithSettings modifies the element and its child tree by inserting +// character data tokens containing newlines and indentation. The behavior of +// the indentation algorithm is configured by the indent settings. Because +// this function indents the element as if it were at the root of a document, +// it is most useful when called just before writing the element as an XML +// fragment using WriteTo. +func (e *Element) IndentWithSettings(s *IndentSettings) { + e.indent(1, getIndentFunc(s), s) +} + +// indent recursively inserts proper indentation between an XML element's +// child tokens. +func (e *Element) indent(depth int, indent indentFunc, s *IndentSettings) { + e.stripIndent(s) + n := len(e.Child) + if n == 0 { + return + } + + oldChild := e.Child + e.Child = make([]Token, 0, n*2+1) + isCharData, firstNonCharData := false, true + for _, c := range oldChild { + // Insert NL+indent before child if it's not character data. + // Exceptions: when it's the first non-character-data child, or when + // the child is at root depth. + _, isCharData = c.(*CharData) + if !isCharData { + if !firstNonCharData || depth > 0 { + s := indent(depth) + if s != "" { + newCharData(s, whitespaceFlag, e) + } + } + firstNonCharData = false + } + + e.addChild(c) + + // Recursively process child elements. + if ce, ok := c.(*Element); ok { + ce.indent(depth+1, indent, s) + } + } + + // Insert NL+indent before the last child. + if !isCharData { + if !firstNonCharData || depth > 0 { + s := indent(depth - 1) + if s != "" { + newCharData(s, whitespaceFlag, e) + } + } + } +} + +// stripIndent removes any previously inserted indentation. +func (e *Element) stripIndent(s *IndentSettings) { + // Count the number of non-indent child tokens + n := len(e.Child) + for _, c := range e.Child { + if cd, ok := c.(*CharData); ok && cd.IsWhitespace() { + n-- + } + } + if n == len(e.Child) { + return + } + if n == 0 && len(e.Child) == 1 && s.PreserveLeafWhitespace { + return + } + + // Strip out indent CharData + newChild := make([]Token, n) + j := 0 + for _, c := range e.Child { + if cd, ok := c.(*CharData); ok && cd.IsWhitespace() { + continue + } + newChild[j] = c + newChild[j].setIndex(j) + j++ + } + e.Child = newChild +} + +// stripTrailingWhitespace removes any trailing whitespace CharData tokens +// from the element's children. +func (e *Element) stripTrailingWhitespace() { + for i := len(e.Child) - 1; i >= 0; i-- { + if cd, ok := e.Child[i].(*CharData); !ok || !cd.IsWhitespace() { + e.Child = e.Child[:i+1] + return + } + } +} + +// dup duplicates the element. +func (e *Element) dup(parent *Element) Token { + ne := &Element{ + Space: e.Space, + Tag: e.Tag, + Attr: make([]Attr, len(e.Attr)), + Child: make([]Token, len(e.Child)), + parent: parent, + index: e.index, + } + for i, t := range e.Child { + ne.Child[i] = t.dup(ne) + } + copy(ne.Attr, e.Attr) + return ne +} + +// Parent returns this element's parent element. It returns nil if this +// element has no parent. +func (e *Element) Parent() *Element { + return e.parent +} + +// Index returns the index of this element within its parent element's +// list of child tokens. If this element has no parent, then the function +// returns -1. +func (e *Element) Index() int { + return e.index +} + +// WriteTo serializes the element to the writer w. +func (e *Element) WriteTo(w Writer, s *WriteSettings) { + w.WriteByte('<') + w.WriteString(e.FullTag()) + for _, a := range e.Attr { + w.WriteByte(' ') + a.WriteTo(w, s) + } + if len(e.Child) > 0 { + w.WriteByte('>') + for _, c := range e.Child { + c.WriteTo(w, s) + } + w.Write([]byte{'<', '/'}) + w.WriteString(e.FullTag()) + w.WriteByte('>') + } else { + if s.CanonicalEndTags { + w.Write([]byte{'>', '<', '/'}) + w.WriteString(e.FullTag()) + w.WriteByte('>') + } else { + w.Write([]byte{'/', '>'}) + } + } +} + +// setParent replaces this element token's parent. +func (e *Element) setParent(parent *Element) { + e.parent = parent +} + +// setIndex sets this element token's index within its parent's Child slice. +func (e *Element) setIndex(index int) { + e.index = index +} + +// addChild adds a child token to the element e. +func (e *Element) addChild(t Token) { + t.setParent(e) + t.setIndex(len(e.Child)) + e.Child = append(e.Child, t) +} + +// CreateAttr creates an attribute with the specified 'key' and 'value' and +// adds it to this element. If an attribute with same key already exists on +// this element, then its value is replaced. The key may include a namespace +// prefix followed by a colon. +func (e *Element) CreateAttr(key, value string) *Attr { + space, skey := spaceDecompose(key) + return e.createAttr(space, skey, value, e) +} + +// createAttr is a helper function that creates attributes. +func (e *Element) createAttr(space, key, value string, parent *Element) *Attr { + for i, a := range e.Attr { + if space == a.Space && key == a.Key { + e.Attr[i].Value = value + return &e.Attr[i] + } + } + a := Attr{ + Space: space, + Key: key, + Value: value, + element: parent, + } + e.Attr = append(e.Attr, a) + return &e.Attr[len(e.Attr)-1] +} + +// RemoveAttr removes the first attribute of this element whose key matches +// 'key'. It returns a copy of the removed attribute if a match is found. If +// no match is found, it returns nil. The key may include a namespace prefix +// followed by a colon. +func (e *Element) RemoveAttr(key string) *Attr { + space, skey := spaceDecompose(key) + for i, a := range e.Attr { + if space == a.Space && skey == a.Key { + e.Attr = append(e.Attr[0:i], e.Attr[i+1:]...) + return &Attr{ + Space: a.Space, + Key: a.Key, + Value: a.Value, + element: nil, + } + } + } + return nil +} + +// SortAttrs sorts this element's attributes lexicographically by key. +func (e *Element) SortAttrs() { + sort.Sort(byAttr(e.Attr)) +} + +type byAttr []Attr + +func (a byAttr) Len() int { + return len(a) +} + +func (a byAttr) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a byAttr) Less(i, j int) bool { + sp := strings.Compare(a[i].Space, a[j].Space) + if sp == 0 { + return strings.Compare(a[i].Key, a[j].Key) < 0 + } + return sp < 0 +} + +// FullKey returns this attribute's complete key, including namespace prefix +// if present. +func (a *Attr) FullKey() string { + if a.Space == "" { + return a.Key + } + return a.Space + ":" + a.Key +} + +// Element returns a pointer to the element containing this attribute. +func (a *Attr) Element() *Element { + return a.element +} + +// NamespaceURI returns the XML namespace URI associated with this attribute. +// The function returns the empty string if the attribute is unprefixed or +// if the attribute is part of the XML default namespace. +func (a *Attr) NamespaceURI() string { + if a.Space == "" { + return "" + } + return a.element.findLocalNamespaceURI(a.Space) +} + +// WriteTo serializes the attribute to the writer. +func (a *Attr) WriteTo(w Writer, s *WriteSettings) { + w.WriteString(a.FullKey()) + if s.AttrSingleQuote { + w.WriteString(`='`) + } else { + w.WriteString(`="`) + } + var m escapeMode + if s.CanonicalAttrVal { + m = escapeCanonicalAttr + } else { + m = escapeNormal + } + escapeString(w, a.Value, m) + if s.AttrSingleQuote { + w.WriteByte('\'') + } else { + w.WriteByte('"') + } +} + +// NewText creates an unparented CharData token containing simple text data. +func NewText(text string) *CharData { + return newCharData(text, 0, nil) +} + +// NewCData creates an unparented XML character CDATA section with 'data' as +// its content. +func NewCData(data string) *CharData { + return newCharData(data, cdataFlag, nil) +} + +// NewCharData creates an unparented CharData token containing simple text +// data. +// +// Deprecated: NewCharData is deprecated. Instead, use NewText, which does the +// same thing. +func NewCharData(data string) *CharData { + return newCharData(data, 0, nil) +} + +// newCharData creates a character data token and binds it to a parent +// element. If parent is nil, the CharData token remains unbound. +func newCharData(data string, flags charDataFlags, parent *Element) *CharData { + c := &CharData{ + Data: data, + parent: nil, + index: -1, + flags: flags, + } + if parent != nil { + parent.addChild(c) + } + return c +} + +// CreateText creates a CharData token containing simple text data and adds it +// to the end of this element's list of child tokens. +func (e *Element) CreateText(text string) *CharData { + return newCharData(text, 0, e) +} + +// CreateCData creates a CharData token containing a CDATA section with 'data' +// as its content and adds it to the end of this element's list of child +// tokens. +func (e *Element) CreateCData(data string) *CharData { + return newCharData(data, cdataFlag, e) +} + +// CreateCharData creates a CharData token containing simple text data and +// adds it to the end of this element's list of child tokens. +// +// Deprecated: CreateCharData is deprecated. Instead, use CreateText, which +// does the same thing. +func (e *Element) CreateCharData(data string) *CharData { + return e.CreateText(data) +} + +// SetData modifies the content of the CharData token. In the case of a +// CharData token containing simple text, the simple text is modified. In the +// case of a CharData token containing a CDATA section, the CDATA section's +// content is modified. +func (c *CharData) SetData(text string) { + c.Data = text + if isWhitespace(text) { + c.flags |= whitespaceFlag + } else { + c.flags &= ^whitespaceFlag + } +} + +// IsCData returns true if this CharData token is contains a CDATA section. It +// returns false if the CharData token contains simple text. +func (c *CharData) IsCData() bool { + return (c.flags & cdataFlag) != 0 +} + +// IsWhitespace returns true if this CharData token contains only whitespace. +func (c *CharData) IsWhitespace() bool { + return (c.flags & whitespaceFlag) != 0 +} + +// Parent returns this CharData token's parent element, or nil if it has no +// parent. +func (c *CharData) Parent() *Element { + return c.parent +} + +// Index returns the index of this CharData token within its parent element's +// list of child tokens. If this CharData token has no parent, then the +// function returns -1. +func (c *CharData) Index() int { + return c.index +} + +// WriteTo serializes character data to the writer. +func (c *CharData) WriteTo(w Writer, s *WriteSettings) { + if c.IsCData() { + w.WriteString(``) + } else { + var m escapeMode + if s.CanonicalText { + m = escapeCanonicalText + } else { + m = escapeNormal + } + escapeString(w, c.Data, m) + } +} + +// dup duplicates the character data. +func (c *CharData) dup(parent *Element) Token { + return &CharData{ + Data: c.Data, + flags: c.flags, + parent: parent, + index: c.index, + } +} + +// setParent replaces the character data token's parent. +func (c *CharData) setParent(parent *Element) { + c.parent = parent +} + +// setIndex sets the CharData token's index within its parent element's Child +// slice. +func (c *CharData) setIndex(index int) { + c.index = index +} + +// NewComment creates an unparented comment token. +func NewComment(comment string) *Comment { + return newComment(comment, nil) +} + +// NewComment creates a comment token and sets its parent element to 'parent'. +func newComment(comment string, parent *Element) *Comment { + c := &Comment{ + Data: comment, + parent: nil, + index: -1, + } + if parent != nil { + parent.addChild(c) + } + return c +} + +// CreateComment creates a comment token using the specified 'comment' string +// and adds it as the last child token of this element. +func (e *Element) CreateComment(comment string) *Comment { + return newComment(comment, e) +} + +// dup duplicates the comment. +func (c *Comment) dup(parent *Element) Token { + return &Comment{ + Data: c.Data, + parent: parent, + index: c.index, + } +} + +// Parent returns comment token's parent element, or nil if it has no parent. +func (c *Comment) Parent() *Element { + return c.parent +} + +// Index returns the index of this Comment token within its parent element's +// list of child tokens. If this Comment token has no parent, then the +// function returns -1. +func (c *Comment) Index() int { + return c.index +} + +// WriteTo serialies the comment to the writer. +func (c *Comment) WriteTo(w Writer, s *WriteSettings) { + w.WriteString("") +} + +// setParent replaces the comment token's parent. +func (c *Comment) setParent(parent *Element) { + c.parent = parent +} + +// setIndex sets the Comment token's index within its parent element's Child +// slice. +func (c *Comment) setIndex(index int) { + c.index = index +} + +// NewDirective creates an unparented XML directive token. +func NewDirective(data string) *Directive { + return newDirective(data, nil) +} + +// newDirective creates an XML directive and binds it to a parent element. If +// parent is nil, the Directive remains unbound. +func newDirective(data string, parent *Element) *Directive { + d := &Directive{ + Data: data, + parent: nil, + index: -1, + } + if parent != nil { + parent.addChild(d) + } + return d +} + +// CreateDirective creates an XML directive token with the specified 'data' +// value and adds it as the last child token of this element. +func (e *Element) CreateDirective(data string) *Directive { + return newDirective(data, e) +} + +// dup duplicates the directive. +func (d *Directive) dup(parent *Element) Token { + return &Directive{ + Data: d.Data, + parent: parent, + index: d.index, + } +} + +// Parent returns directive token's parent element, or nil if it has no +// parent. +func (d *Directive) Parent() *Element { + return d.parent +} + +// Index returns the index of this Directive token within its parent element's +// list of child tokens. If this Directive token has no parent, then the +// function returns -1. +func (d *Directive) Index() int { + return d.index +} + +// WriteTo serializes the XML directive to the writer. +func (d *Directive) WriteTo(w Writer, s *WriteSettings) { + w.WriteString("") +} + +// setParent replaces the directive token's parent. +func (d *Directive) setParent(parent *Element) { + d.parent = parent +} + +// setIndex sets the Directive token's index within its parent element's Child +// slice. +func (d *Directive) setIndex(index int) { + d.index = index +} + +// NewProcInst creates an unparented XML processing instruction. +func NewProcInst(target, inst string) *ProcInst { + return newProcInst(target, inst, nil) +} + +// newProcInst creates an XML processing instruction and binds it to a parent +// element. If parent is nil, the ProcInst remains unbound. +func newProcInst(target, inst string, parent *Element) *ProcInst { + p := &ProcInst{ + Target: target, + Inst: inst, + parent: nil, + index: -1, + } + if parent != nil { + parent.addChild(p) + } + return p +} + +// CreateProcInst creates an XML processing instruction token with the +// specified 'target' and instruction 'inst'. It is then added as the last +// child token of this element. +func (e *Element) CreateProcInst(target, inst string) *ProcInst { + return newProcInst(target, inst, e) +} + +// dup duplicates the procinst. +func (p *ProcInst) dup(parent *Element) Token { + return &ProcInst{ + Target: p.Target, + Inst: p.Inst, + parent: parent, + index: p.index, + } +} + +// Parent returns processing instruction token's parent element, or nil if it +// has no parent. +func (p *ProcInst) Parent() *Element { + return p.parent +} + +// Index returns the index of this ProcInst token within its parent element's +// list of child tokens. If this ProcInst token has no parent, then the +// function returns -1. +func (p *ProcInst) Index() int { + return p.index +} + +// WriteTo serializes the processing instruction to the writer. +func (p *ProcInst) WriteTo(w Writer, s *WriteSettings) { + w.WriteString("") +} + +// setParent replaces the processing instruction token's parent. +func (p *ProcInst) setParent(parent *Element) { + p.parent = parent +} + +// setIndex sets the processing instruction token's index within its parent +// element's Child slice. +func (p *ProcInst) setIndex(index int) { + p.index = index +} diff --git a/vendor/github.com/beevik/etree/helpers.go b/vendor/github.com/beevik/etree/helpers.go new file mode 100644 index 00000000..b31fd754 --- /dev/null +++ b/vendor/github.com/beevik/etree/helpers.go @@ -0,0 +1,394 @@ +// Copyright 2015-2019 Brett Vickers. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package etree + +import ( + "io" + "strings" + "unicode/utf8" +) + +// A simple stack +type stack struct { + data []interface{} +} + +func (s *stack) empty() bool { + return len(s.data) == 0 +} + +func (s *stack) push(value interface{}) { + s.data = append(s.data, value) +} + +func (s *stack) pop() interface{} { + value := s.data[len(s.data)-1] + s.data[len(s.data)-1] = nil + s.data = s.data[:len(s.data)-1] + return value +} + +func (s *stack) peek() interface{} { + return s.data[len(s.data)-1] +} + +// A fifo is a simple first-in-first-out queue. +type fifo struct { + data []interface{} + head, tail int +} + +func (f *fifo) add(value interface{}) { + if f.len()+1 >= len(f.data) { + f.grow() + } + f.data[f.tail] = value + if f.tail++; f.tail == len(f.data) { + f.tail = 0 + } +} + +func (f *fifo) remove() interface{} { + value := f.data[f.head] + f.data[f.head] = nil + if f.head++; f.head == len(f.data) { + f.head = 0 + } + return value +} + +func (f *fifo) len() int { + if f.tail >= f.head { + return f.tail - f.head + } + return len(f.data) - f.head + f.tail +} + +func (f *fifo) grow() { + c := len(f.data) * 2 + if c == 0 { + c = 4 + } + buf, count := make([]interface{}, c), f.len() + if f.tail >= f.head { + copy(buf[0:count], f.data[f.head:f.tail]) + } else { + hindex := len(f.data) - f.head + copy(buf[0:hindex], f.data[f.head:]) + copy(buf[hindex:count], f.data[:f.tail]) + } + f.data, f.head, f.tail = buf, 0, count +} + +// xmlReader provides the interface by which an XML byte stream is +// processed and decoded. +type xmlReader interface { + Bytes() int64 + Read(p []byte) (n int, err error) +} + +// xmlSimpleReader implements a proxy reader that counts the number of +// bytes read from its encapsulated reader. +type xmlSimpleReader struct { + r io.Reader + bytes int64 +} + +func newXmlSimpleReader(r io.Reader) xmlReader { + return &xmlSimpleReader{r, 0} +} + +func (xr *xmlSimpleReader) Bytes() int64 { + return xr.bytes +} + +func (xr *xmlSimpleReader) Read(p []byte) (n int, err error) { + n, err = xr.r.Read(p) + xr.bytes += int64(n) + return n, err +} + +// xmlPeekReader implements a proxy reader that counts the number of +// bytes read from its encapsulated reader. It also allows the caller to +// "peek" at the previous portions of the buffer after they have been +// parsed. +type xmlPeekReader struct { + r io.Reader + bytes int64 // total bytes read by the Read function + buf []byte // internal read buffer + bufSize int // total bytes used in the read buffer + bufOffset int64 // total bytes read when buf was last filled + window []byte // current read buffer window + peekBuf []byte // buffer used to store data to be peeked at later + peekOffset int64 // total read offset of the start of the peek buffer +} + +func newXmlPeekReader(r io.Reader) *xmlPeekReader { + buf := make([]byte, 4096) + return &xmlPeekReader{ + r: r, + bytes: 0, + buf: buf, + bufSize: 0, + bufOffset: 0, + window: buf[0:0], + peekBuf: make([]byte, 0), + peekOffset: -1, + } +} + +func (xr *xmlPeekReader) Bytes() int64 { + return xr.bytes +} + +func (xr *xmlPeekReader) Read(p []byte) (n int, err error) { + if len(xr.window) == 0 { + err = xr.fill() + if err != nil { + return 0, err + } + if len(xr.window) == 0 { + return 0, nil + } + } + + if len(xr.window) < len(p) { + n = len(xr.window) + } else { + n = len(p) + } + + copy(p, xr.window) + xr.window = xr.window[n:] + xr.bytes += int64(n) + + return n, err +} + +func (xr *xmlPeekReader) PeekPrepare(offset int64, maxLen int) { + if maxLen > cap(xr.peekBuf) { + xr.peekBuf = make([]byte, 0, maxLen) + } + xr.peekBuf = xr.peekBuf[0:0] + xr.peekOffset = offset + xr.updatePeekBuf() +} + +func (xr *xmlPeekReader) PeekFinalize() []byte { + xr.updatePeekBuf() + return xr.peekBuf +} + +func (xr *xmlPeekReader) fill() error { + xr.bufOffset = xr.bytes + xr.bufSize = 0 + n, err := xr.r.Read(xr.buf) + if err != nil { + xr.window, xr.bufSize = xr.buf[0:0], 0 + return err + } + xr.window, xr.bufSize = xr.buf[:n], n + xr.updatePeekBuf() + return nil +} + +func (xr *xmlPeekReader) updatePeekBuf() { + peekRemain := cap(xr.peekBuf) - len(xr.peekBuf) + if xr.peekOffset >= 0 && peekRemain > 0 { + rangeMin := xr.peekOffset + rangeMax := xr.peekOffset + int64(cap(xr.peekBuf)) + bufMin := xr.bufOffset + bufMax := xr.bufOffset + int64(xr.bufSize) + if rangeMin < bufMin { + rangeMin = bufMin + } + if rangeMax > bufMax { + rangeMax = bufMax + } + if rangeMax > rangeMin { + rangeMin -= xr.bufOffset + rangeMax -= xr.bufOffset + if int(rangeMax-rangeMin) > peekRemain { + rangeMax = rangeMin + int64(peekRemain) + } + xr.peekBuf = append(xr.peekBuf, xr.buf[rangeMin:rangeMax]...) + } + } +} + +// xmlWriter implements a proxy writer that counts the number of +// bytes written by its encapsulated writer. +type xmlWriter struct { + w io.Writer + bytes int64 +} + +func newXmlWriter(w io.Writer) *xmlWriter { + return &xmlWriter{w: w} +} + +func (xw *xmlWriter) Write(p []byte) (n int, err error) { + n, err = xw.w.Write(p) + xw.bytes += int64(n) + return n, err +} + +// isWhitespace returns true if the byte slice contains only +// whitespace characters. +func isWhitespace(s string) bool { + for i := 0; i < len(s); i++ { + if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' { + return false + } + } + return true +} + +// spaceMatch returns true if namespace a is the empty string +// or if namespace a equals namespace b. +func spaceMatch(a, b string) bool { + switch { + case a == "": + return true + default: + return a == b + } +} + +// spaceDecompose breaks a namespace:tag identifier at the ':' +// and returns the two parts. +func spaceDecompose(str string) (space, key string) { + colon := strings.IndexByte(str, ':') + if colon == -1 { + return "", str + } + return str[:colon], str[colon+1:] +} + +// Strings used by indentCRLF and indentLF +const ( + indentSpaces = "\r\n " + indentTabs = "\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t" +) + +// indentCRLF returns a CRLF newline followed by n copies of the first +// non-CRLF character in the source string. +func indentCRLF(n int, source string) string { + switch { + case n < 0: + return source[:2] + case n < len(source)-1: + return source[:n+2] + default: + return source + strings.Repeat(source[2:3], n-len(source)+2) + } +} + +// indentLF returns a LF newline followed by n copies of the first non-LF +// character in the source string. +func indentLF(n int, source string) string { + switch { + case n < 0: + return source[1:2] + case n < len(source)-1: + return source[1 : n+2] + default: + return source[1:] + strings.Repeat(source[2:3], n-len(source)+2) + } +} + +// nextIndex returns the index of the next occurrence of sep in s, +// starting from offset. It returns -1 if the sep string is not found. +func nextIndex(s, sep string, offset int) int { + switch i := strings.Index(s[offset:], sep); i { + case -1: + return -1 + default: + return offset + i + } +} + +// isInteger returns true if the string s contains an integer. +func isInteger(s string) bool { + for i := 0; i < len(s); i++ { + if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') { + return false + } + } + return true +} + +type escapeMode byte + +const ( + escapeNormal escapeMode = iota + escapeCanonicalText + escapeCanonicalAttr +) + +// escapeString writes an escaped version of a string to the writer. +func escapeString(w Writer, s string, m escapeMode) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRuneInString(s[i:]) + i += width + switch r { + case '&': + esc = []byte("&") + case '<': + esc = []byte("<") + case '>': + if m == escapeCanonicalAttr { + continue + } + esc = []byte(">") + case '\'': + if m != escapeNormal { + continue + } + esc = []byte("'") + case '"': + if m == escapeCanonicalText { + continue + } + esc = []byte(""") + case '\t': + if m != escapeCanonicalAttr { + continue + } + esc = []byte(" ") + case '\n': + if m != escapeCanonicalAttr { + continue + } + esc = []byte(" ") + case '\r': + if m == escapeNormal { + continue + } + esc = []byte(" ") + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = []byte("\uFFFD") + break + } + continue + } + w.WriteString(s[last : i-width]) + w.Write(esc) + last = i + } + w.WriteString(s[last:]) +} + +func isInCharacterRange(r rune) bool { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} diff --git a/vendor/github.com/beevik/etree/path.go b/vendor/github.com/beevik/etree/path.go new file mode 100644 index 00000000..a6d67ace --- /dev/null +++ b/vendor/github.com/beevik/etree/path.go @@ -0,0 +1,586 @@ +// Copyright 2015-2019 Brett Vickers. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package etree + +import ( + "strconv" + "strings" +) + +/* +A Path is a string that represents a search path through an etree starting +from the document root or an arbitrary element. Paths are used with the +Element object's Find* methods to locate and return desired elements. + +A Path consists of a series of slash-separated "selectors", each of which may +be modified by one or more bracket-enclosed "filters". Selectors are used to +traverse the etree from element to element, while filters are used to narrow +the list of candidate elements at each node. + +Although etree Path strings are structurally and behaviorally similar to XPath +strings (https://www.w3.org/TR/1999/REC-xpath-19991116/), they have a more +limited set of selectors and filtering options. + +The following selectors are supported by etree paths: + + . Select the current element. + .. Select the parent of the current element. + * Select all child elements of the current element. + / Select the root element when used at the start of a path. + // Select all descendants of the current element. + tag Select all child elements with a name matching the tag. + +The following basic filters are supported: + + [@attrib] Keep elements with an attribute named attrib. + [@attrib='val'] Keep elements with an attribute named attrib and value matching val. + [tag] Keep elements with a child element named tag. + [tag='val'] Keep elements with a child element named tag and text matching val. + [n] Keep the n-th element, where n is a numeric index starting from 1. + +The following function-based filters are supported: + + [text()] Keep elements with non-empty text. + [text()='val'] Keep elements whose text matches val. + [local-name()='val'] Keep elements whose un-prefixed tag matches val. + [name()='val'] Keep elements whose full tag exactly matches val. + [namespace-prefix()] Keep elements with non-empty namespace prefixes. + [namespace-prefix()='val'] Keep elements whose namespace prefix matches val. + [namespace-uri()] Keep elements with non-empty namespace URIs. + [namespace-uri()='val'] Keep elements whose namespace URI matches val. + +Below are some examples of etree path strings. + +Select the bookstore child element of the root element: + + /bookstore + +Beginning from the root element, select the title elements of all descendant +book elements having a 'category' attribute of 'WEB': + + //book[@category='WEB']/title + +Beginning from the current element, select the first descendant book element +with a title child element containing the text 'Great Expectations': + + .//book[title='Great Expectations'][1] + +Beginning from the current element, select all child elements of book elements +with an attribute 'language' set to 'english': + + ./book/*[@language='english'] + +Beginning from the current element, select all child elements of book elements +containing the text 'special': + + ./book/*[text()='special'] + +Beginning from the current element, select all descendant book elements whose +title child element has a 'language' attribute of 'french': + + .//book/title[@language='french']/.. + +Beginning from the current element, select all descendant book elements +belonging to the http://www.w3.org/TR/html4/ namespace: + + .//book[namespace-uri()='http://www.w3.org/TR/html4/'] +*/ +type Path struct { + segments []segment +} + +// ErrPath is returned by path functions when an invalid etree path is provided. +type ErrPath string + +// Error returns the string describing a path error. +func (err ErrPath) Error() string { + return "etree: " + string(err) +} + +// CompilePath creates an optimized version of an XPath-like string that +// can be used to query elements in an element tree. +func CompilePath(path string) (Path, error) { + var comp compiler + segments := comp.parsePath(path) + if comp.err != ErrPath("") { + return Path{nil}, comp.err + } + return Path{segments}, nil +} + +// MustCompilePath creates an optimized version of an XPath-like string that +// can be used to query elements in an element tree. Panics if an error +// occurs. Use this function to create Paths when you know the path is +// valid (i.e., if it's hard-coded). +func MustCompilePath(path string) Path { + p, err := CompilePath(path) + if err != nil { + panic(err) + } + return p +} + +// A segment is a portion of a path between "/" characters. +// It contains one selector and zero or more [filters]. +type segment struct { + sel selector + filters []filter +} + +func (seg *segment) apply(e *Element, p *pather) { + seg.sel.apply(e, p) + for _, f := range seg.filters { + f.apply(p) + } +} + +// A selector selects XML elements for consideration by the +// path traversal. +type selector interface { + apply(e *Element, p *pather) +} + +// A filter pares down a list of candidate XML elements based +// on a path filter in [brackets]. +type filter interface { + apply(p *pather) +} + +// A pather is helper object that traverses an element tree using +// a Path object. It collects and deduplicates all elements matching +// the path query. +type pather struct { + queue fifo + results []*Element + inResults map[*Element]bool + candidates []*Element + scratch []*Element // used by filters +} + +// A node represents an element and the remaining path segments that +// should be applied against it by the pather. +type node struct { + e *Element + segments []segment +} + +func newPather() *pather { + return &pather{ + results: make([]*Element, 0), + inResults: make(map[*Element]bool), + candidates: make([]*Element, 0), + scratch: make([]*Element, 0), + } +} + +// traverse follows the path from the element e, collecting +// and then returning all elements that match the path's selectors +// and filters. +func (p *pather) traverse(e *Element, path Path) []*Element { + for p.queue.add(node{e, path.segments}); p.queue.len() > 0; { + p.eval(p.queue.remove().(node)) + } + return p.results +} + +// eval evaluates the current path node by applying the remaining +// path's selector rules against the node's element. +func (p *pather) eval(n node) { + p.candidates = p.candidates[0:0] + seg, remain := n.segments[0], n.segments[1:] + seg.apply(n.e, p) + + if len(remain) == 0 { + for _, c := range p.candidates { + if in := p.inResults[c]; !in { + p.inResults[c] = true + p.results = append(p.results, c) + } + } + } else { + for _, c := range p.candidates { + p.queue.add(node{c, remain}) + } + } +} + +// A compiler generates a compiled path from a path string. +type compiler struct { + err ErrPath +} + +// parsePath parses an XPath-like string describing a path +// through an element tree and returns a slice of segment +// descriptors. +func (c *compiler) parsePath(path string) []segment { + // If path ends with //, fix it + if strings.HasSuffix(path, "//") { + path += "*" + } + + var segments []segment + + // Check for an absolute path + if strings.HasPrefix(path, "/") { + segments = append(segments, segment{new(selectRoot), []filter{}}) + path = path[1:] + } + + // Split path into segments + for _, s := range splitPath(path) { + segments = append(segments, c.parseSegment(s)) + if c.err != ErrPath("") { + break + } + } + return segments +} + +func splitPath(path string) []string { + var pieces []string + start := 0 + inquote := false + for i := 0; i+1 <= len(path); i++ { + if path[i] == '\'' { + inquote = !inquote + } else if path[i] == '/' && !inquote { + pieces = append(pieces, path[start:i]) + start = i + 1 + } + } + return append(pieces, path[start:]) +} + +// parseSegment parses a path segment between / characters. +func (c *compiler) parseSegment(path string) segment { + pieces := strings.Split(path, "[") + seg := segment{ + sel: c.parseSelector(pieces[0]), + filters: []filter{}, + } + for i := 1; i < len(pieces); i++ { + fpath := pieces[i] + if len(fpath) == 0 || fpath[len(fpath)-1] != ']' { + c.err = ErrPath("path has invalid filter [brackets].") + break + } + seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1])) + } + return seg +} + +// parseSelector parses a selector at the start of a path segment. +func (c *compiler) parseSelector(path string) selector { + switch path { + case ".": + return new(selectSelf) + case "..": + return new(selectParent) + case "*": + return new(selectChildren) + case "": + return new(selectDescendants) + default: + return newSelectChildrenByTag(path) + } +} + +var fnTable = map[string]func(e *Element) string{ + "local-name": (*Element).name, + "name": (*Element).FullTag, + "namespace-prefix": (*Element).namespacePrefix, + "namespace-uri": (*Element).NamespaceURI, + "text": (*Element).Text, +} + +// parseFilter parses a path filter contained within [brackets]. +func (c *compiler) parseFilter(path string) filter { + if len(path) == 0 { + c.err = ErrPath("path contains an empty filter expression.") + return nil + } + + // Filter contains [@attr='val'], [fn()='val'], or [tag='val']? + eqindex := strings.Index(path, "='") + if eqindex >= 0 { + rindex := nextIndex(path, "'", eqindex+2) + if rindex != len(path)-1 { + c.err = ErrPath("path has mismatched filter quotes.") + return nil + } + + key := path[:eqindex] + value := path[eqindex+2 : rindex] + + switch { + case key[0] == '@': + return newFilterAttrVal(key[1:], value) + case strings.HasSuffix(key, "()"): + name := key[:len(key)-2] + if fn, ok := fnTable[name]; ok { + return newFilterFuncVal(fn, value) + } + c.err = ErrPath("path has unknown function " + name) + return nil + default: + return newFilterChildText(key, value) + } + } + + // Filter contains [@attr], [N], [tag] or [fn()] + switch { + case path[0] == '@': + return newFilterAttr(path[1:]) + case strings.HasSuffix(path, "()"): + name := path[:len(path)-2] + if fn, ok := fnTable[name]; ok { + return newFilterFunc(fn) + } + c.err = ErrPath("path has unknown function " + name) + return nil + case isInteger(path): + pos, _ := strconv.Atoi(path) + switch { + case pos > 0: + return newFilterPos(pos - 1) + default: + return newFilterPos(pos) + } + default: + return newFilterChild(path) + } +} + +// selectSelf selects the current element into the candidate list. +type selectSelf struct{} + +func (s *selectSelf) apply(e *Element, p *pather) { + p.candidates = append(p.candidates, e) +} + +// selectRoot selects the element's root node. +type selectRoot struct{} + +func (s *selectRoot) apply(e *Element, p *pather) { + root := e + for root.parent != nil { + root = root.parent + } + p.candidates = append(p.candidates, root) +} + +// selectParent selects the element's parent into the candidate list. +type selectParent struct{} + +func (s *selectParent) apply(e *Element, p *pather) { + if e.parent != nil { + p.candidates = append(p.candidates, e.parent) + } +} + +// selectChildren selects the element's child elements into the +// candidate list. +type selectChildren struct{} + +func (s *selectChildren) apply(e *Element, p *pather) { + for _, c := range e.Child { + if c, ok := c.(*Element); ok { + p.candidates = append(p.candidates, c) + } + } +} + +// selectDescendants selects all descendant child elements +// of the element into the candidate list. +type selectDescendants struct{} + +func (s *selectDescendants) apply(e *Element, p *pather) { + var queue fifo + for queue.add(e); queue.len() > 0; { + e := queue.remove().(*Element) + p.candidates = append(p.candidates, e) + for _, c := range e.Child { + if c, ok := c.(*Element); ok { + queue.add(c) + } + } + } +} + +// selectChildrenByTag selects into the candidate list all child +// elements of the element having the specified tag. +type selectChildrenByTag struct { + space, tag string +} + +func newSelectChildrenByTag(path string) *selectChildrenByTag { + s, l := spaceDecompose(path) + return &selectChildrenByTag{s, l} +} + +func (s *selectChildrenByTag) apply(e *Element, p *pather) { + for _, c := range e.Child { + if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag { + p.candidates = append(p.candidates, c) + } + } +} + +// filterPos filters the candidate list, keeping only the +// candidate at the specified index. +type filterPos struct { + index int +} + +func newFilterPos(pos int) *filterPos { + return &filterPos{pos} +} + +func (f *filterPos) apply(p *pather) { + if f.index >= 0 { + if f.index < len(p.candidates) { + p.scratch = append(p.scratch, p.candidates[f.index]) + } + } else { + if -f.index <= len(p.candidates) { + p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index]) + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterAttr filters the candidate list for elements having +// the specified attribute. +type filterAttr struct { + space, key string +} + +func newFilterAttr(str string) *filterAttr { + s, l := spaceDecompose(str) + return &filterAttr{s, l} +} + +func (f *filterAttr) apply(p *pather) { + for _, c := range p.candidates { + for _, a := range c.Attr { + if spaceMatch(f.space, a.Space) && f.key == a.Key { + p.scratch = append(p.scratch, c) + break + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterAttrVal filters the candidate list for elements having +// the specified attribute with the specified value. +type filterAttrVal struct { + space, key, val string +} + +func newFilterAttrVal(str, value string) *filterAttrVal { + s, l := spaceDecompose(str) + return &filterAttrVal{s, l, value} +} + +func (f *filterAttrVal) apply(p *pather) { + for _, c := range p.candidates { + for _, a := range c.Attr { + if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value { + p.scratch = append(p.scratch, c) + break + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterFunc filters the candidate list for elements satisfying a custom +// boolean function. +type filterFunc struct { + fn func(e *Element) string +} + +func newFilterFunc(fn func(e *Element) string) *filterFunc { + return &filterFunc{fn} +} + +func (f *filterFunc) apply(p *pather) { + for _, c := range p.candidates { + if f.fn(c) != "" { + p.scratch = append(p.scratch, c) + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterFuncVal filters the candidate list for elements containing a value +// matching the result of a custom function. +type filterFuncVal struct { + fn func(e *Element) string + val string +} + +func newFilterFuncVal(fn func(e *Element) string, value string) *filterFuncVal { + return &filterFuncVal{fn, value} +} + +func (f *filterFuncVal) apply(p *pather) { + for _, c := range p.candidates { + if f.fn(c) == f.val { + p.scratch = append(p.scratch, c) + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterChild filters the candidate list for elements having +// a child element with the specified tag. +type filterChild struct { + space, tag string +} + +func newFilterChild(str string) *filterChild { + s, l := spaceDecompose(str) + return &filterChild{s, l} +} + +func (f *filterChild) apply(p *pather) { + for _, c := range p.candidates { + for _, cc := range c.Child { + if cc, ok := cc.(*Element); ok && + spaceMatch(f.space, cc.Space) && + f.tag == cc.Tag { + p.scratch = append(p.scratch, c) + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterChildText filters the candidate list for elements having +// a child element with the specified tag and text. +type filterChildText struct { + space, tag, text string +} + +func newFilterChildText(str, text string) *filterChildText { + s, l := spaceDecompose(str) + return &filterChildText{s, l, text} +} + +func (f *filterChildText) apply(p *pather) { + for _, c := range p.candidates { + for _, cc := range c.Child { + if cc, ok := cc.(*Element); ok && + spaceMatch(f.space, cc.Space) && + f.tag == cc.Tag && + f.text == cc.Text() { + p.scratch = append(p.scratch, c) + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/BUILD b/vendor/github.com/coreos/go-oidc/v3/oidc/BUILD index 5f539bf3..a44ce7b2 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/BUILD +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/BUILD @@ -12,7 +12,7 @@ go_library( importpath = "github.com/coreos/go-oidc/v3/oidc", visibility = ["//visibility:public"], deps = [ - "//vendor/gopkg.in/square/go-jose.v2:go-jose_v2", + "//vendor/github.com/go-jose/go-jose/v3:go-jose", "@org_golang_x_oauth2//:oauth2", ], ) diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go index 8afa895c..b7bd0927 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go @@ -13,4 +13,5 @@ const ( PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256 PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384 PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 + EdDSA = "EdDSA" // Ed25519 using SHA-512 ) diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go index a272b7ab..539933b3 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go @@ -2,6 +2,10 @@ package oidc import ( "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" "errors" "fmt" "io/ioutil" @@ -9,9 +13,39 @@ import ( "sync" "time" - jose "gopkg.in/square/go-jose.v2" + jose "github.com/go-jose/go-jose/v3" ) +// StaticKeySet is a verifier that validates JWT against a static set of public keys. +type StaticKeySet struct { + // PublicKeys used to verify the JWT. Supported types are *rsa.PublicKey and + // *ecdsa.PublicKey. + PublicKeys []crypto.PublicKey +} + +// VerifySignature compares the signature against a static set of public keys. +func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { + jws, err := jose.ParseSigned(jwt) + if err != nil { + return nil, fmt.Errorf("parsing jwt: %v", err) + } + for _, pub := range s.PublicKeys { + switch pub.(type) { + case *rsa.PublicKey: + case *ecdsa.PublicKey: + case ed25519.PublicKey: + default: + return nil, fmt.Errorf("invalid public key type provided: %T", pub) + } + payload, err := jws.Verify(pub) + if err != nil { + continue + } + return payload, nil + } + return nil, fmt.Errorf("no public keys able to verify jwt") +} + // NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP // GETs to fetch JSON web token sets hosted at a remote URL. This is automatically // used by NewProvider using the URLs returned by OpenID Connect discovery, but is @@ -28,7 +62,7 @@ func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) if now == nil { now = time.Now } - return &RemoteKeySet{jwksURL: jwksURL, ctx: cloneContext(ctx), now: now} + return &RemoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now} } // RemoteKeySet is a KeySet implementation that validates JSON web tokens against @@ -81,15 +115,23 @@ func (i *inflight) result() ([]jose.JSONWebKey, error) { return i.keys, i.err } +// paresdJWTKey is a context key that allows common setups to avoid parsing the +// JWT twice. It holds a *jose.JSONWebSignature value. +var parsedJWTKey contextKey + // VerifySignature validates a payload against a signature from the jwks_uri. // // Users MUST NOT call this method directly and should use an IDTokenVerifier // instead. This method skips critical validations such as 'alg' values and is // only exported to implement the KeySet interface. func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { - jws, err := jose.ParseSigned(jwt) - if err != nil { - return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature) + if !ok { + var err error + jws, err = jose.ParseSigned(jwt) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } } return r.verify(ctx, jws) } diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go index 3e1d80e0..b159d1cc 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go @@ -14,6 +14,7 @@ import ( "mime" "net/http" "strings" + "sync" "time" "golang.org/x/oauth2" @@ -48,39 +49,34 @@ var issuerURLKey contextKey // This method sets the same context key used by the golang.org/x/oauth2 package, // so the returned context works for that package too. // -// myClient := &http.Client{} -// ctx := oidc.ClientContext(parentContext, myClient) -// -// // This will use the custom client -// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com") +// myClient := &http.Client{} +// ctx := oidc.ClientContext(parentContext, myClient) // +// // This will use the custom client +// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com") func ClientContext(ctx context.Context, client *http.Client) context.Context { return context.WithValue(ctx, oauth2.HTTPClient, client) } -// cloneContext copies a context's bag-of-values into a new context that isn't -// associated with its cancellation. This is used to initialize remote keys sets -// which run in the background and aren't associated with the initial context. -func cloneContext(ctx context.Context) context.Context { - cp := context.Background() +func getClient(ctx context.Context) *http.Client { if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok { - cp = ClientContext(cp, c) + return c } - return cp + return nil } // InsecureIssuerURLContext allows discovery to work when the issuer_url reported // by upstream is mismatched with the discovery URL. This is meant for integration // with off-spec providers such as Azure. // -// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0" -// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0" +// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0" +// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0" // -// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL) +// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL) // -// // Provider will be discovered with the discoveryBaseURL, but use issuerURL -// // for future issuer validation. -// provider, err := oidc.NewProvider(ctx, discoveryBaseURL) +// // Provider will be discovered with the discoveryBaseURL, but use issuerURL +// // for future issuer validation. +// provider, err := oidc.NewProvider(ctx, discoveryBaseURL) // // This is insecure because validating the correct issuer is critical for multi-tenant // proivders. Any overrides here MUST be carefully reviewed. @@ -90,7 +86,7 @@ func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Con func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { client := http.DefaultClient - if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok { + if c := getClient(ctx); c != nil { client = c } return client.Do(req.WithContext(ctx)) @@ -102,12 +98,33 @@ type Provider struct { authURL string tokenURL string userInfoURL string + jwksURL string algorithms []string // Raw claims returned by the server. rawClaims []byte - remoteKeySet KeySet + // Guards all of the following fields. + mu sync.Mutex + // HTTP client specified from the initial NewProvider request. This is used + // when creating the common key set. + client *http.Client + // A key set that uses context.Background() and is shared between all code paths + // that don't have a convinent way of supplying a unique context. + commonRemoteKeySet KeySet +} + +func (p *Provider) remoteKeySet() KeySet { + p.mu.Lock() + defer p.mu.Unlock() + if p.commonRemoteKeySet == nil { + ctx := context.Background() + if p.client != nil { + ctx = ClientContext(ctx, p.client) + } + p.commonRemoteKeySet = NewRemoteKeySet(ctx, p.jwksURL) + } + return p.commonRemoteKeySet } type providerJSON struct { @@ -132,6 +149,50 @@ var supportedAlgorithms = map[string]bool{ PS256: true, PS384: true, PS512: true, + EdDSA: true, +} + +// ProviderConfig allows creating providers when discovery isn't supported. It's +// generally easier to use NewProvider directly. +type ProviderConfig struct { + // IssuerURL is the identity of the provider, and the string it uses to sign + // ID tokens with. For example "https://accounts.google.com". This value MUST + // match ID tokens exactly. + IssuerURL string + // AuthURL is the endpoint used by the provider to support the OAuth 2.0 + // authorization endpoint. + AuthURL string + // TokenURL is the endpoint used by the provider to support the OAuth 2.0 + // token endpoint. + TokenURL string + // UserInfoURL is the endpoint used by the provider to support the OpenID + // Connect UserInfo flow. + // + // https://openid.net/specs/openid-connect-core-1_0.html#UserInfo + UserInfoURL string + // JWKSURL is the endpoint used by the provider to advertise public keys to + // verify issued ID tokens. This endpoint is polled as new keys are made + // available. + JWKSURL string + + // Algorithms, if provided, indicate a list of JWT algorithms allowed to sign + // ID tokens. If not provided, this defaults to the algorithms advertised by + // the JWK endpoint, then the set of algorithms supported by this package. + Algorithms []string +} + +// NewProvider initializes a provider from a set of endpoints, rather than +// through discovery. +func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { + return &Provider{ + issuer: p.IssuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + userInfoURL: p.UserInfoURL, + jwksURL: p.JWKSURL, + algorithms: p.Algorithms, + client: getClient(ctx), + } } // NewProvider uses the OpenID Connect discovery mechanism to construct a Provider. @@ -179,26 +240,27 @@ func NewProvider(ctx context.Context, issuer string) (*Provider, error) { } } return &Provider{ - issuer: issuerURL, - authURL: p.AuthURL, - tokenURL: p.TokenURL, - userInfoURL: p.UserInfoURL, - algorithms: algs, - rawClaims: body, - remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL), + issuer: issuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + userInfoURL: p.UserInfoURL, + jwksURL: p.JWKSURL, + algorithms: algs, + rawClaims: body, + client: getClient(ctx), }, nil } // Claims unmarshals raw fields returned by the server during discovery. // -// var claims struct { -// ScopesSupported []string `json:"scopes_supported"` -// ClaimsSupported []string `json:"claims_supported"` -// } +// var claims struct { +// ScopesSupported []string `json:"scopes_supported"` +// ClaimsSupported []string `json:"claims_supported"` +// } // -// if err := provider.Claims(&claims); err != nil { -// // handle unmarshaling error -// } +// if err := provider.Claims(&claims); err != nil { +// // handle unmarshaling error +// } // // For a list of fields defined by the OpenID Connect spec see: // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata @@ -214,6 +276,12 @@ func (p *Provider) Endpoint() oauth2.Endpoint { return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL} } +// UserInfoEndpoint returns the OpenID Connect userinfo endpoint for the given +// provider. +func (p *Provider) UserInfoEndpoint() string { + return p.userInfoURL +} + // UserInfo represents the OpenID Connect userinfo claims. type UserInfo struct { Subject string `json:"sub"` @@ -275,7 +343,7 @@ func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) ct := resp.Header.Get("Content-Type") mediaType, _, parseErr := mime.ParseMediaType(ct) if parseErr == nil && mediaType == "application/jwt" { - payload, err := p.remoteKeySet.VerifySignature(ctx, string(body)) + payload, err := p.remoteKeySet().VerifySignature(ctx, string(body)) if err != nil { return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err) } @@ -349,18 +417,17 @@ type IDToken struct { // Claims unmarshals the raw JSON payload of the ID Token into a provided struct. // -// idToken, err := idTokenVerifier.Verify(rawIDToken) -// if err != nil { -// // handle error -// } -// var claims struct { -// Email string `json:"email"` -// EmailVerified bool `json:"email_verified"` -// } -// if err := idToken.Claims(&claims); err != nil { -// // handle error -// } -// +// idToken, err := idTokenVerifier.Verify(rawIDToken) +// if err != nil { +// // handle error +// } +// var claims struct { +// Email string `json:"email"` +// EmailVerified bool `json:"email_verified"` +// } +// if err := idToken.Claims(&claims); err != nil { +// // handle error +// } func (i *IDToken) Claims(v interface{}) error { if i.claims == nil { return errors.New("oidc: claims not set") @@ -382,7 +449,7 @@ func (i *IDToken) VerifyAccessToken(accessToken string) error { h = sha256.New() case RS384, ES384, PS384: h = sha512.New384() - case RS512, ES512, PS512: + case RS512, ES512, PS512, EdDSA: h = sha512.New() default: return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm) diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go index dc6b56df..3e5ffbc7 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go @@ -12,8 +12,8 @@ import ( "strings" "time" + jose "github.com/go-jose/go-jose/v3" "golang.org/x/oauth2" - jose "gopkg.in/square/go-jose.v2" ) const ( @@ -21,6 +21,18 @@ const ( issuerGoogleAccountsNoScheme = "accounts.google.com" ) +// TokenExpiredError indicates that Verify failed because the token was expired. This +// error does NOT indicate that the token is not also invalid for other reasons. Other +// checks might have failed if the expiration check had not failed. +type TokenExpiredError struct { + // Expiry is the time when the token expired. + Expiry time.Time +} + +func (e *TokenExpiredError) Error() string { + return fmt.Sprintf("oidc: token is expired (Token Expiry: %v)", e.Expiry) +} + // KeySet is a set of publc JSON Web Keys that can be used to validate the signature // of JSON web tokens. This is expected to be backed by a remote key set through // provider metadata discovery or an in-memory set of keys delivered out-of-band. @@ -52,19 +64,13 @@ type IDTokenVerifier struct { // This constructor can be used to create a verifier directly using the issuer URL and // JSON Web Key Set URL without using discovery: // -// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs") -// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) +// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs") +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) // -// Since KeySet is an interface, this constructor can also be used to supply custom -// public key sources. For example, if a user wanted to supply public keys out-of-band -// and hold them statically in-memory: -// -// // Custom KeySet implementation. -// keySet := newStatisKeySet(publicKeys...) -// -// // Verifier uses the custom KeySet implementation. -// verifier := oidc.NewVerifier("https://auth.example.com", keySet, config) +// Or a static key set (e.g. for testing): // +// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}} +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier { return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL} } @@ -100,13 +106,35 @@ type Config struct { // Time function to check Token expiry. Defaults to time.Now Now func() time.Time + + // InsecureSkipSignatureCheck causes this package to skip JWT signature validation. + // It's intended for special cases where providers (such as Azure), use the "none" + // algorithm. + // + // This option can only be enabled safely when the ID Token is received directly + // from the provider after the token exchange. + // + // This option MUST NOT be used when receiving an ID Token from sources other + // than the token endpoint. + InsecureSkipSignatureCheck bool +} + +// VerifierContext returns an IDTokenVerifier that uses the provider's key set to +// verify JWTs. As opposed to Verifier, the context is used for all requests to +// the upstream JWKs endpoint. +func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier { + return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config) } // Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs. // -// The returned IDTokenVerifier is tied to the Provider's context and its behavior is -// undefined once the Provider's context is canceled. +// The returned verifier uses a background context for all requests to the upstream +// JWKs endpoint. To control that context, use VerifierContext instead. func (p *Provider) Verifier(config *Config) *IDTokenVerifier { + return p.newVerifier(p.remoteKeySet(), config) +} + +func (p *Provider) newVerifier(keySet KeySet, config *Config) *IDTokenVerifier { if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 { // Make a copy so we don't modify the config values. cp := &Config{} @@ -114,7 +142,7 @@ func (p *Provider) Verifier(config *Config) *IDTokenVerifier { cp.SupportedSigningAlgs = p.algorithms config = cp } - return NewVerifier(p.issuer, p.remoteKeySet, config) + return NewVerifier(p.issuer, keySet, config) } func parseJWT(p string) ([]byte, error) { @@ -178,25 +206,19 @@ func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src // // See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation // -// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code")) -// if err != nil { -// // handle error -// } +// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code")) +// if err != nil { +// // handle error +// } // -// // Extract the ID Token from oauth2 token. -// rawIDToken, ok := oauth2Token.Extra("id_token").(string) -// if !ok { -// // handle error -// } -// -// token, err := verifier.Verify(ctx, rawIDToken) +// // Extract the ID Token from oauth2 token. +// rawIDToken, ok := oauth2Token.Extra("id_token").(string) +// if !ok { +// // handle error +// } // +// token, err := verifier.Verify(ctx, rawIDToken) func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) { - jws, err := jose.ParseSigned(rawIDToken) - if err != nil { - return nil, fmt.Errorf("oidc: malformed jwt: %v", err) - } - // Throw out tokens with invalid claims before trying to verify the token. This lets // us do cheap checks before possibly re-syncing keys. payload, err := parseJWT(rawIDToken) @@ -268,13 +290,15 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok nowTime := now() if t.Expiry.Before(nowTime) { - return nil, fmt.Errorf("oidc: token is expired (Token Expiry: %v)", t.Expiry) + return nil, &TokenExpiredError{Expiry: t.Expiry} } // If nbf claim is provided in token, ensure that it is indeed in the past. if token.NotBefore != nil { nbfTime := time.Time(*token.NotBefore) - leeway := 1 * time.Minute + // Set to 5 minutes since this is what other OpenID Connect providers do to deal with clock skew. + // https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/blob/6.12.2/src/Microsoft.IdentityModel.Tokens/TokenValidationParameters.cs#L149-L153 + leeway := 5 * time.Minute if nowTime.Add(leeway).Before(nbfTime) { return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime) @@ -282,6 +306,15 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok } } + if v.config.InsecureSkipSignatureCheck { + return t, nil + } + + jws, err := jose.ParseSigned(rawIDToken) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + switch len(jws.Signatures) { case 0: return nil, fmt.Errorf("oidc: id token not signed") @@ -302,6 +335,7 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok t.sigAlgorithm = sig.Header.Algorithm + ctx = context.WithValue(ctx, parsedJWTKey, jws) gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken) if err != nil { return nil, fmt.Errorf("failed to verify signature: %v", err) diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 00000000..23a0ada2 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/BUILD b/vendor/github.com/coreos/go-semver/semver/BUILD new file mode 100644 index 00000000..d06fe853 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/BUILD @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "semver", + srcs = [ + "semver.go", + "sort.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/coreos/go-semver/semver", + importpath = "github.com/coreos/go-semver/semver", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 00000000..eb9fb7ff --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 00000000..e256b41a --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/LICENSE b/vendor/github.com/coreos/go-systemd/v22/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/v22/NOTICE b/vendor/github.com/coreos/go-systemd/v22/NOTICE new file mode 100644 index 00000000..23a0ada2 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/BUILD b/vendor/github.com/coreos/go-systemd/v22/journal/BUILD new file mode 100644 index 00000000..ac7d462b --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/journal/BUILD @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "journal", + srcs = [ + "journal.go", + "journal_unix.go", + "journal_windows.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/coreos/go-systemd/v22/journal", + importpath = "github.com/coreos/go-systemd/v22/journal", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go new file mode 100644 index 00000000..ac24c776 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go @@ -0,0 +1,46 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "fmt" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go new file mode 100644 index 00000000..8d58ca0f --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go @@ -0,0 +1,210 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) + +func init() { + onceConn.Do(initConn) +} + +// Enabled checks whether the local systemd journal is available for logging. +func Enabled() bool { + onceConn.Do(initConn) + + if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + return false + } + + conn, err := net.Dial("unixgram", journalSocket) + if err != nil { + return false + } + defer conn.Close() + + return true +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn == nil { + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } + + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err + } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + + return nil +} + +func appendVariable(w io.Writer, name, value string) { + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } + + for _, c := range name { + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } + } + return nil +} + +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok || opErr == nil { + return false + } + + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { + return false + } + + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS +} + +// tempFd creates a temporary, unlinked file under `/dev/shm`. +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + err = syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +// initConn initializes the global `unixConnPtr` socket. +// It is meant to be called exactly once, at program startup. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go new file mode 100644 index 00000000..677aca68 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go @@ -0,0 +1,35 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "errors" +) + +func Enabled() bool { + return false +} + +func Send(message string, priority Priority, vars map[string]string) error { + return errors.New("could not initialize socket to journald") +} diff --git a/vendor/github.com/dexidp/dex/LICENSE b/vendor/github.com/dexidp/dex/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/dexidp/dex/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/dexidp/dex/api/v2/BUILD b/vendor/github.com/dexidp/dex/api/v2/BUILD new file mode 100644 index 00000000..301a9fc8 --- /dev/null +++ b/vendor/github.com/dexidp/dex/api/v2/BUILD @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "api", + srcs = [ + "api.pb.go", + "api_grpc.pb.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/api/v2", + importpath = "github.com/dexidp/dex/api/v2", + visibility = ["//visibility:public"], + deps = [ + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//runtime/protoimpl", + ], +) diff --git a/vendor/github.com/dexidp/dex/api/v2/LICENSE b/vendor/github.com/dexidp/dex/api/v2/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/dexidp/dex/api/v2/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/dexidp/dex/api/v2/api.pb.go b/vendor/github.com/dexidp/dex/api/v2/api.pb.go new file mode 100644 index 00000000..6440979f --- /dev/null +++ b/vendor/github.com/dexidp/dex/api/v2/api.pb.go @@ -0,0 +1,2100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: api/v2/api.proto + +package api + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Client represents an OAuth2 client. +type Client struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"` + RedirectUris []string `protobuf:"bytes,3,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` + TrustedPeers []string `protobuf:"bytes,4,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` + Public bool `protobuf:"varint,5,opt,name=public,proto3" json:"public,omitempty"` + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + LogoUrl string `protobuf:"bytes,7,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` +} + +func (x *Client) Reset() { + *x = Client{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Client) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Client) ProtoMessage() {} + +func (x *Client) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Client.ProtoReflect.Descriptor instead. +func (*Client) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{0} +} + +func (x *Client) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Client) GetSecret() string { + if x != nil { + return x.Secret + } + return "" +} + +func (x *Client) GetRedirectUris() []string { + if x != nil { + return x.RedirectUris + } + return nil +} + +func (x *Client) GetTrustedPeers() []string { + if x != nil { + return x.TrustedPeers + } + return nil +} + +func (x *Client) GetPublic() bool { + if x != nil { + return x.Public + } + return false +} + +func (x *Client) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Client) GetLogoUrl() string { + if x != nil { + return x.LogoUrl + } + return "" +} + +// GetClientReq is a request to retrieve client details. +type GetClientReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ID of the client. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *GetClientReq) Reset() { + *x = GetClientReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetClientReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetClientReq) ProtoMessage() {} + +func (x *GetClientReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetClientReq.ProtoReflect.Descriptor instead. +func (*GetClientReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{1} +} + +func (x *GetClientReq) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +// GetClientResp returns the client details. +type GetClientResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Client *Client `protobuf:"bytes,1,opt,name=client,proto3" json:"client,omitempty"` +} + +func (x *GetClientResp) Reset() { + *x = GetClientResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetClientResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetClientResp) ProtoMessage() {} + +func (x *GetClientResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetClientResp.ProtoReflect.Descriptor instead. +func (*GetClientResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{2} +} + +func (x *GetClientResp) GetClient() *Client { + if x != nil { + return x.Client + } + return nil +} + +// CreateClientReq is a request to make a client. +type CreateClientReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Client *Client `protobuf:"bytes,1,opt,name=client,proto3" json:"client,omitempty"` +} + +func (x *CreateClientReq) Reset() { + *x = CreateClientReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateClientReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateClientReq) ProtoMessage() {} + +func (x *CreateClientReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateClientReq.ProtoReflect.Descriptor instead. +func (*CreateClientReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateClientReq) GetClient() *Client { + if x != nil { + return x.Client + } + return nil +} + +// CreateClientResp returns the response from creating a client. +type CreateClientResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AlreadyExists bool `protobuf:"varint,1,opt,name=already_exists,json=alreadyExists,proto3" json:"already_exists,omitempty"` + Client *Client `protobuf:"bytes,2,opt,name=client,proto3" json:"client,omitempty"` +} + +func (x *CreateClientResp) Reset() { + *x = CreateClientResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateClientResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateClientResp) ProtoMessage() {} + +func (x *CreateClientResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateClientResp.ProtoReflect.Descriptor instead. +func (*CreateClientResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{4} +} + +func (x *CreateClientResp) GetAlreadyExists() bool { + if x != nil { + return x.AlreadyExists + } + return false +} + +func (x *CreateClientResp) GetClient() *Client { + if x != nil { + return x.Client + } + return nil +} + +// DeleteClientReq is a request to delete a client. +type DeleteClientReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ID of the client. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *DeleteClientReq) Reset() { + *x = DeleteClientReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteClientReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteClientReq) ProtoMessage() {} + +func (x *DeleteClientReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteClientReq.ProtoReflect.Descriptor instead. +func (*DeleteClientReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteClientReq) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +// DeleteClientResp determines if the client is deleted successfully. +type DeleteClientResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NotFound bool `protobuf:"varint,1,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` +} + +func (x *DeleteClientResp) Reset() { + *x = DeleteClientResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteClientResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteClientResp) ProtoMessage() {} + +func (x *DeleteClientResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteClientResp.ProtoReflect.Descriptor instead. +func (*DeleteClientResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{6} +} + +func (x *DeleteClientResp) GetNotFound() bool { + if x != nil { + return x.NotFound + } + return false +} + +// UpdateClientReq is a request to update an existing client. +type UpdateClientReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + RedirectUris []string `protobuf:"bytes,2,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` + TrustedPeers []string `protobuf:"bytes,3,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + LogoUrl string `protobuf:"bytes,5,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` +} + +func (x *UpdateClientReq) Reset() { + *x = UpdateClientReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateClientReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateClientReq) ProtoMessage() {} + +func (x *UpdateClientReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateClientReq.ProtoReflect.Descriptor instead. +func (*UpdateClientReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateClientReq) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *UpdateClientReq) GetRedirectUris() []string { + if x != nil { + return x.RedirectUris + } + return nil +} + +func (x *UpdateClientReq) GetTrustedPeers() []string { + if x != nil { + return x.TrustedPeers + } + return nil +} + +func (x *UpdateClientReq) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateClientReq) GetLogoUrl() string { + if x != nil { + return x.LogoUrl + } + return "" +} + +// UpdateClientResp returns the response from updating a client. +type UpdateClientResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NotFound bool `protobuf:"varint,1,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` +} + +func (x *UpdateClientResp) Reset() { + *x = UpdateClientResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateClientResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateClientResp) ProtoMessage() {} + +func (x *UpdateClientResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateClientResp.ProtoReflect.Descriptor instead. +func (*UpdateClientResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateClientResp) GetNotFound() bool { + if x != nil { + return x.NotFound + } + return false +} + +// Password is an email for password mapping managed by the storage. +type Password struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` + // Currently we do not accept plain text passwords. Could be an option in the future. + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"` + UserId string `protobuf:"bytes,4,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` +} + +func (x *Password) Reset() { + *x = Password{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Password) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Password) ProtoMessage() {} + +func (x *Password) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Password.ProtoReflect.Descriptor instead. +func (*Password) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{9} +} + +func (x *Password) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *Password) GetHash() []byte { + if x != nil { + return x.Hash + } + return nil +} + +func (x *Password) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *Password) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +// CreatePasswordReq is a request to make a password. +type CreatePasswordReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Password *Password `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *CreatePasswordReq) Reset() { + *x = CreatePasswordReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreatePasswordReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreatePasswordReq) ProtoMessage() {} + +func (x *CreatePasswordReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreatePasswordReq.ProtoReflect.Descriptor instead. +func (*CreatePasswordReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{10} +} + +func (x *CreatePasswordReq) GetPassword() *Password { + if x != nil { + return x.Password + } + return nil +} + +// CreatePasswordResp returns the response from creating a password. +type CreatePasswordResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AlreadyExists bool `protobuf:"varint,1,opt,name=already_exists,json=alreadyExists,proto3" json:"already_exists,omitempty"` +} + +func (x *CreatePasswordResp) Reset() { + *x = CreatePasswordResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreatePasswordResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreatePasswordResp) ProtoMessage() {} + +func (x *CreatePasswordResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreatePasswordResp.ProtoReflect.Descriptor instead. +func (*CreatePasswordResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{11} +} + +func (x *CreatePasswordResp) GetAlreadyExists() bool { + if x != nil { + return x.AlreadyExists + } + return false +} + +// UpdatePasswordReq is a request to modify an existing password. +type UpdatePasswordReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The email used to lookup the password. This field cannot be modified + Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` + NewHash []byte `protobuf:"bytes,2,opt,name=new_hash,json=newHash,proto3" json:"new_hash,omitempty"` + NewUsername string `protobuf:"bytes,3,opt,name=new_username,json=newUsername,proto3" json:"new_username,omitempty"` +} + +func (x *UpdatePasswordReq) Reset() { + *x = UpdatePasswordReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdatePasswordReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdatePasswordReq) ProtoMessage() {} + +func (x *UpdatePasswordReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdatePasswordReq.ProtoReflect.Descriptor instead. +func (*UpdatePasswordReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{12} +} + +func (x *UpdatePasswordReq) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *UpdatePasswordReq) GetNewHash() []byte { + if x != nil { + return x.NewHash + } + return nil +} + +func (x *UpdatePasswordReq) GetNewUsername() string { + if x != nil { + return x.NewUsername + } + return "" +} + +// UpdatePasswordResp returns the response from modifying an existing password. +type UpdatePasswordResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NotFound bool `protobuf:"varint,1,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` +} + +func (x *UpdatePasswordResp) Reset() { + *x = UpdatePasswordResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdatePasswordResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdatePasswordResp) ProtoMessage() {} + +func (x *UpdatePasswordResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdatePasswordResp.ProtoReflect.Descriptor instead. +func (*UpdatePasswordResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{13} +} + +func (x *UpdatePasswordResp) GetNotFound() bool { + if x != nil { + return x.NotFound + } + return false +} + +// DeletePasswordReq is a request to delete a password. +type DeletePasswordReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` +} + +func (x *DeletePasswordReq) Reset() { + *x = DeletePasswordReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeletePasswordReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeletePasswordReq) ProtoMessage() {} + +func (x *DeletePasswordReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeletePasswordReq.ProtoReflect.Descriptor instead. +func (*DeletePasswordReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{14} +} + +func (x *DeletePasswordReq) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +// DeletePasswordResp returns the response from deleting a password. +type DeletePasswordResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NotFound bool `protobuf:"varint,1,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` +} + +func (x *DeletePasswordResp) Reset() { + *x = DeletePasswordResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeletePasswordResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeletePasswordResp) ProtoMessage() {} + +func (x *DeletePasswordResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeletePasswordResp.ProtoReflect.Descriptor instead. +func (*DeletePasswordResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{15} +} + +func (x *DeletePasswordResp) GetNotFound() bool { + if x != nil { + return x.NotFound + } + return false +} + +// ListPasswordReq is a request to enumerate passwords. +type ListPasswordReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListPasswordReq) Reset() { + *x = ListPasswordReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListPasswordReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListPasswordReq) ProtoMessage() {} + +func (x *ListPasswordReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPasswordReq.ProtoReflect.Descriptor instead. +func (*ListPasswordReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{16} +} + +// ListPasswordResp returns a list of passwords. +type ListPasswordResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Passwords []*Password `protobuf:"bytes,1,rep,name=passwords,proto3" json:"passwords,omitempty"` +} + +func (x *ListPasswordResp) Reset() { + *x = ListPasswordResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListPasswordResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListPasswordResp) ProtoMessage() {} + +func (x *ListPasswordResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPasswordResp.ProtoReflect.Descriptor instead. +func (*ListPasswordResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{17} +} + +func (x *ListPasswordResp) GetPasswords() []*Password { + if x != nil { + return x.Passwords + } + return nil +} + +// VersionReq is a request to fetch version info. +type VersionReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VersionReq) Reset() { + *x = VersionReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionReq) ProtoMessage() {} + +func (x *VersionReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionReq.ProtoReflect.Descriptor instead. +func (*VersionReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{18} +} + +// VersionResp holds the version info of components. +type VersionResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Semantic version of the server. + Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"` + // Numeric version of the API. It increases every time a new call is added to the API. + // Clients should use this info to determine if the server supports specific features. + Api int32 `protobuf:"varint,2,opt,name=api,proto3" json:"api,omitempty"` +} + +func (x *VersionResp) Reset() { + *x = VersionResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionResp) ProtoMessage() {} + +func (x *VersionResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionResp.ProtoReflect.Descriptor instead. +func (*VersionResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{19} +} + +func (x *VersionResp) GetServer() string { + if x != nil { + return x.Server + } + return "" +} + +func (x *VersionResp) GetApi() int32 { + if x != nil { + return x.Api + } + return 0 +} + +// RefreshTokenRef contains the metadata for a refresh token that is managed by the storage. +type RefreshTokenRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID of the refresh token. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + CreatedAt int64 `protobuf:"varint,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + LastUsed int64 `protobuf:"varint,6,opt,name=last_used,json=lastUsed,proto3" json:"last_used,omitempty"` +} + +func (x *RefreshTokenRef) Reset() { + *x = RefreshTokenRef{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RefreshTokenRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RefreshTokenRef) ProtoMessage() {} + +func (x *RefreshTokenRef) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RefreshTokenRef.ProtoReflect.Descriptor instead. +func (*RefreshTokenRef) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{20} +} + +func (x *RefreshTokenRef) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *RefreshTokenRef) GetClientId() string { + if x != nil { + return x.ClientId + } + return "" +} + +func (x *RefreshTokenRef) GetCreatedAt() int64 { + if x != nil { + return x.CreatedAt + } + return 0 +} + +func (x *RefreshTokenRef) GetLastUsed() int64 { + if x != nil { + return x.LastUsed + } + return 0 +} + +// ListRefreshReq is a request to enumerate the refresh tokens of a user. +type ListRefreshReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The "sub" claim returned in the ID Token. + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` +} + +func (x *ListRefreshReq) Reset() { + *x = ListRefreshReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListRefreshReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListRefreshReq) ProtoMessage() {} + +func (x *ListRefreshReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListRefreshReq.ProtoReflect.Descriptor instead. +func (*ListRefreshReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{21} +} + +func (x *ListRefreshReq) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +// ListRefreshResp returns a list of refresh tokens for a user. +type ListRefreshResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RefreshTokens []*RefreshTokenRef `protobuf:"bytes,1,rep,name=refresh_tokens,json=refreshTokens,proto3" json:"refresh_tokens,omitempty"` +} + +func (x *ListRefreshResp) Reset() { + *x = ListRefreshResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListRefreshResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListRefreshResp) ProtoMessage() {} + +func (x *ListRefreshResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListRefreshResp.ProtoReflect.Descriptor instead. +func (*ListRefreshResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{22} +} + +func (x *ListRefreshResp) GetRefreshTokens() []*RefreshTokenRef { + if x != nil { + return x.RefreshTokens + } + return nil +} + +// RevokeRefreshReq is a request to revoke the refresh token of the user-client pair. +type RevokeRefreshReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The "sub" claim returned in the ID Token. + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` +} + +func (x *RevokeRefreshReq) Reset() { + *x = RevokeRefreshReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RevokeRefreshReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RevokeRefreshReq) ProtoMessage() {} + +func (x *RevokeRefreshReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RevokeRefreshReq.ProtoReflect.Descriptor instead. +func (*RevokeRefreshReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{23} +} + +func (x *RevokeRefreshReq) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +func (x *RevokeRefreshReq) GetClientId() string { + if x != nil { + return x.ClientId + } + return "" +} + +// RevokeRefreshResp determines if the refresh token is revoked successfully. +type RevokeRefreshResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Set to true is refresh token was not found and token could not be revoked. + NotFound bool `protobuf:"varint,1,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` +} + +func (x *RevokeRefreshResp) Reset() { + *x = RevokeRefreshResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RevokeRefreshResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RevokeRefreshResp) ProtoMessage() {} + +func (x *RevokeRefreshResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RevokeRefreshResp.ProtoReflect.Descriptor instead. +func (*RevokeRefreshResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{24} +} + +func (x *RevokeRefreshResp) GetNotFound() bool { + if x != nil { + return x.NotFound + } + return false +} + +type VerifyPasswordReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *VerifyPasswordReq) Reset() { + *x = VerifyPasswordReq{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyPasswordReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyPasswordReq) ProtoMessage() {} + +func (x *VerifyPasswordReq) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyPasswordReq.ProtoReflect.Descriptor instead. +func (*VerifyPasswordReq) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{25} +} + +func (x *VerifyPasswordReq) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *VerifyPasswordReq) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +type VerifyPasswordResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Verified bool `protobuf:"varint,1,opt,name=verified,proto3" json:"verified,omitempty"` + NotFound bool `protobuf:"varint,2,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` +} + +func (x *VerifyPasswordResp) Reset() { + *x = VerifyPasswordResp{} + if protoimpl.UnsafeEnabled { + mi := &file_api_v2_api_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyPasswordResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyPasswordResp) ProtoMessage() {} + +func (x *VerifyPasswordResp) ProtoReflect() protoreflect.Message { + mi := &file_api_v2_api_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyPasswordResp.ProtoReflect.Descriptor instead. +func (*VerifyPasswordResp) Descriptor() ([]byte, []int) { + return file_api_v2_api_proto_rawDescGZIP(), []int{26} +} + +func (x *VerifyPasswordResp) GetVerified() bool { + if x != nil { + return x.Verified + } + return false +} + +func (x *VerifyPasswordResp) GetNotFound() bool { + if x != nil { + return x.NotFound + } + return false +} + +var File_api_v2_api_proto protoreflect.FileDescriptor + +var file_api_v2_api_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x03, 0x61, 0x70, 0x69, 0x22, 0xc1, 0x01, 0x0a, 0x06, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, + 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, + 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x22, 0x1e, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x34, 0x0a, 0x0d, 0x47, + 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x23, 0x0a, 0x06, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x22, 0x36, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, 0x5e, 0x0a, 0x10, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, + 0x0e, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, + 0x69, 0x73, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2f, 0x0a, 0x10, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x9a, 0x01, + 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, + 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, + 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x6f, 0x55, 0x72, 0x6c, 0x22, 0x2f, 0x0a, 0x10, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, + 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x69, 0x0a, 0x08, 0x50, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x12, 0x0a, + 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, + 0x68, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, + 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x29, 0x0a, 0x08, 0x70, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x08, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x3b, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x25, 0x0a, 0x0e, + 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, + 0x73, 0x74, 0x73, 0x22, 0x67, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x19, + 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x6e, 0x65, 0x77, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, + 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x6e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x31, 0x0a, 0x12, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, + 0x29, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x31, 0x0a, 0x12, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x11, 0x0a, + 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, + 0x22, 0x3f, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x12, 0x2b, 0x0a, 0x09, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x09, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x73, 0x22, 0x0c, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x22, + 0x37, 0x0a, 0x0b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x03, 0x61, 0x70, 0x69, 0x22, 0x7a, 0x0a, 0x0f, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x66, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, + 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x73, 0x65, 0x64, 0x22, 0x29, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x22, + 0x4e, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x66, + 0x52, 0x0d, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x22, + 0x48, 0x0a, 0x10, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x52, 0x65, 0x71, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x30, 0x0a, 0x11, 0x52, 0x65, 0x76, + 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, + 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x45, 0x0a, 0x11, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x22, 0x4d, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, + 0x64, 0x32, 0xfd, 0x05, 0x0a, 0x03, 0x44, 0x65, 0x78, 0x12, 0x34, 0x0a, 0x09, 0x47, 0x65, 0x74, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x12, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, + 0x3d, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, + 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d, + 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3d, 0x0a, + 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, + 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, + 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x0d, 0x4c, + 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x14, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, + 0x65, 0x71, 0x1a, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x47, + 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x10, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x3a, + 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x13, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, + 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x52, 0x65, + 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x15, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, + 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x22, + 0x00, 0x42, 0x36, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x6f, 0x73, 0x2e, + 0x64, 0x65, 0x78, 0x2e, 0x61, 0x70, 0x69, 0x5a, 0x20, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x78, 0x69, 0x64, 0x70, 0x2f, 0x64, 0x65, 0x78, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_api_v2_api_proto_rawDescOnce sync.Once + file_api_v2_api_proto_rawDescData = file_api_v2_api_proto_rawDesc +) + +func file_api_v2_api_proto_rawDescGZIP() []byte { + file_api_v2_api_proto_rawDescOnce.Do(func() { + file_api_v2_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_v2_api_proto_rawDescData) + }) + return file_api_v2_api_proto_rawDescData +} + +var file_api_v2_api_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_api_v2_api_proto_goTypes = []interface{}{ + (*Client)(nil), // 0: api.Client + (*GetClientReq)(nil), // 1: api.GetClientReq + (*GetClientResp)(nil), // 2: api.GetClientResp + (*CreateClientReq)(nil), // 3: api.CreateClientReq + (*CreateClientResp)(nil), // 4: api.CreateClientResp + (*DeleteClientReq)(nil), // 5: api.DeleteClientReq + (*DeleteClientResp)(nil), // 6: api.DeleteClientResp + (*UpdateClientReq)(nil), // 7: api.UpdateClientReq + (*UpdateClientResp)(nil), // 8: api.UpdateClientResp + (*Password)(nil), // 9: api.Password + (*CreatePasswordReq)(nil), // 10: api.CreatePasswordReq + (*CreatePasswordResp)(nil), // 11: api.CreatePasswordResp + (*UpdatePasswordReq)(nil), // 12: api.UpdatePasswordReq + (*UpdatePasswordResp)(nil), // 13: api.UpdatePasswordResp + (*DeletePasswordReq)(nil), // 14: api.DeletePasswordReq + (*DeletePasswordResp)(nil), // 15: api.DeletePasswordResp + (*ListPasswordReq)(nil), // 16: api.ListPasswordReq + (*ListPasswordResp)(nil), // 17: api.ListPasswordResp + (*VersionReq)(nil), // 18: api.VersionReq + (*VersionResp)(nil), // 19: api.VersionResp + (*RefreshTokenRef)(nil), // 20: api.RefreshTokenRef + (*ListRefreshReq)(nil), // 21: api.ListRefreshReq + (*ListRefreshResp)(nil), // 22: api.ListRefreshResp + (*RevokeRefreshReq)(nil), // 23: api.RevokeRefreshReq + (*RevokeRefreshResp)(nil), // 24: api.RevokeRefreshResp + (*VerifyPasswordReq)(nil), // 25: api.VerifyPasswordReq + (*VerifyPasswordResp)(nil), // 26: api.VerifyPasswordResp +} +var file_api_v2_api_proto_depIdxs = []int32{ + 0, // 0: api.GetClientResp.client:type_name -> api.Client + 0, // 1: api.CreateClientReq.client:type_name -> api.Client + 0, // 2: api.CreateClientResp.client:type_name -> api.Client + 9, // 3: api.CreatePasswordReq.password:type_name -> api.Password + 9, // 4: api.ListPasswordResp.passwords:type_name -> api.Password + 20, // 5: api.ListRefreshResp.refresh_tokens:type_name -> api.RefreshTokenRef + 1, // 6: api.Dex.GetClient:input_type -> api.GetClientReq + 3, // 7: api.Dex.CreateClient:input_type -> api.CreateClientReq + 7, // 8: api.Dex.UpdateClient:input_type -> api.UpdateClientReq + 5, // 9: api.Dex.DeleteClient:input_type -> api.DeleteClientReq + 10, // 10: api.Dex.CreatePassword:input_type -> api.CreatePasswordReq + 12, // 11: api.Dex.UpdatePassword:input_type -> api.UpdatePasswordReq + 14, // 12: api.Dex.DeletePassword:input_type -> api.DeletePasswordReq + 16, // 13: api.Dex.ListPasswords:input_type -> api.ListPasswordReq + 18, // 14: api.Dex.GetVersion:input_type -> api.VersionReq + 21, // 15: api.Dex.ListRefresh:input_type -> api.ListRefreshReq + 23, // 16: api.Dex.RevokeRefresh:input_type -> api.RevokeRefreshReq + 25, // 17: api.Dex.VerifyPassword:input_type -> api.VerifyPasswordReq + 2, // 18: api.Dex.GetClient:output_type -> api.GetClientResp + 4, // 19: api.Dex.CreateClient:output_type -> api.CreateClientResp + 8, // 20: api.Dex.UpdateClient:output_type -> api.UpdateClientResp + 6, // 21: api.Dex.DeleteClient:output_type -> api.DeleteClientResp + 11, // 22: api.Dex.CreatePassword:output_type -> api.CreatePasswordResp + 13, // 23: api.Dex.UpdatePassword:output_type -> api.UpdatePasswordResp + 15, // 24: api.Dex.DeletePassword:output_type -> api.DeletePasswordResp + 17, // 25: api.Dex.ListPasswords:output_type -> api.ListPasswordResp + 19, // 26: api.Dex.GetVersion:output_type -> api.VersionResp + 22, // 27: api.Dex.ListRefresh:output_type -> api.ListRefreshResp + 24, // 28: api.Dex.RevokeRefresh:output_type -> api.RevokeRefreshResp + 26, // 29: api.Dex.VerifyPassword:output_type -> api.VerifyPasswordResp + 18, // [18:30] is the sub-list for method output_type + 6, // [6:18] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_api_v2_api_proto_init() } +func file_api_v2_api_proto_init() { + if File_api_v2_api_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_api_v2_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Client); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetClientReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetClientResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateClientReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateClientResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteClientReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteClientResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateClientReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateClientResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Password); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreatePasswordReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreatePasswordResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdatePasswordReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdatePasswordResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeletePasswordReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeletePasswordResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPasswordReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPasswordResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshTokenRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListRefreshReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListRefreshResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RevokeRefreshReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RevokeRefreshResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VerifyPasswordReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_v2_api_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VerifyPasswordResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_api_v2_api_proto_rawDesc, + NumEnums: 0, + NumMessages: 27, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_api_v2_api_proto_goTypes, + DependencyIndexes: file_api_v2_api_proto_depIdxs, + MessageInfos: file_api_v2_api_proto_msgTypes, + }.Build() + File_api_v2_api_proto = out.File + file_api_v2_api_proto_rawDesc = nil + file_api_v2_api_proto_goTypes = nil + file_api_v2_api_proto_depIdxs = nil +} diff --git a/vendor/github.com/dexidp/dex/api/v2/api.proto b/vendor/github.com/dexidp/dex/api/v2/api.proto new file mode 100644 index 00000000..c429fd80 --- /dev/null +++ b/vendor/github.com/dexidp/dex/api/v2/api.proto @@ -0,0 +1,202 @@ +syntax = "proto3"; + +package api; + +option java_package = "com.coreos.dex.api"; +option go_package = "github.com/dexidp/dex/api/v2;api"; + +// Client represents an OAuth2 client. +message Client { + string id = 1; + string secret = 2; + repeated string redirect_uris = 3; + repeated string trusted_peers = 4; + bool public = 5; + string name = 6; + string logo_url = 7; +} + +// GetClientReq is a request to retrieve client details. +message GetClientReq { + // The ID of the client. + string id = 1; +} + +// GetClientResp returns the client details. +message GetClientResp { + Client client = 1; +} + +// CreateClientReq is a request to make a client. +message CreateClientReq { + Client client = 1; +} + +// CreateClientResp returns the response from creating a client. +message CreateClientResp { + bool already_exists = 1; + Client client = 2; +} + +// DeleteClientReq is a request to delete a client. +message DeleteClientReq { + // The ID of the client. + string id = 1; +} + +// DeleteClientResp determines if the client is deleted successfully. +message DeleteClientResp { + bool not_found = 1; +} + +// UpdateClientReq is a request to update an existing client. +message UpdateClientReq { + string id = 1; + repeated string redirect_uris = 2; + repeated string trusted_peers = 3; + string name = 4; + string logo_url = 5; +} + +// UpdateClientResp returns the response from updating a client. +message UpdateClientResp { + bool not_found = 1; +} + +// TODO(ericchiang): expand this. + +// Password is an email for password mapping managed by the storage. +message Password { + string email = 1; + + // Currently we do not accept plain text passwords. Could be an option in the future. + bytes hash = 2; + string username = 3; + string user_id = 4; +} + +// CreatePasswordReq is a request to make a password. +message CreatePasswordReq { + Password password = 1; +} + +// CreatePasswordResp returns the response from creating a password. +message CreatePasswordResp { + bool already_exists = 1; +} + +// UpdatePasswordReq is a request to modify an existing password. +message UpdatePasswordReq { + // The email used to lookup the password. This field cannot be modified + string email = 1; + bytes new_hash = 2; + string new_username = 3; +} + +// UpdatePasswordResp returns the response from modifying an existing password. +message UpdatePasswordResp { + bool not_found = 1; +} + +// DeletePasswordReq is a request to delete a password. +message DeletePasswordReq { + string email = 1; +} + +// DeletePasswordResp returns the response from deleting a password. +message DeletePasswordResp { + bool not_found = 1; +} + +// ListPasswordReq is a request to enumerate passwords. +message ListPasswordReq {} + +// ListPasswordResp returns a list of passwords. +message ListPasswordResp { + repeated Password passwords = 1; +} + +// VersionReq is a request to fetch version info. +message VersionReq {} + +// VersionResp holds the version info of components. +message VersionResp { + // Semantic version of the server. + string server = 1; + // Numeric version of the API. It increases every time a new call is added to the API. + // Clients should use this info to determine if the server supports specific features. + int32 api = 2; +} + +// RefreshTokenRef contains the metadata for a refresh token that is managed by the storage. +message RefreshTokenRef { + // ID of the refresh token. + string id = 1; + string client_id = 2; + int64 created_at = 5; + int64 last_used = 6; +} + +// ListRefreshReq is a request to enumerate the refresh tokens of a user. +message ListRefreshReq { + // The "sub" claim returned in the ID Token. + string user_id = 1; +} + +// ListRefreshResp returns a list of refresh tokens for a user. +message ListRefreshResp { + repeated RefreshTokenRef refresh_tokens = 1; +} + +// RevokeRefreshReq is a request to revoke the refresh token of the user-client pair. +message RevokeRefreshReq { + // The "sub" claim returned in the ID Token. + string user_id = 1; + string client_id = 2; +} + +// RevokeRefreshResp determines if the refresh token is revoked successfully. +message RevokeRefreshResp { + // Set to true is refresh token was not found and token could not be revoked. + bool not_found = 1; +} + +message VerifyPasswordReq { + string email = 1; + string password = 2; +} + +message VerifyPasswordResp { + bool verified = 1; + bool not_found = 2; +} + +// Dex represents the dex gRPC service. +service Dex { + // GetClient gets a client. + rpc GetClient(GetClientReq) returns (GetClientResp) {}; + // CreateClient creates a client. + rpc CreateClient(CreateClientReq) returns (CreateClientResp) {}; + // UpdateClient updates an existing client + rpc UpdateClient(UpdateClientReq) returns (UpdateClientResp) {}; + // DeleteClient deletes the provided client. + rpc DeleteClient(DeleteClientReq) returns (DeleteClientResp) {}; + // CreatePassword creates a password. + rpc CreatePassword(CreatePasswordReq) returns (CreatePasswordResp) {}; + // UpdatePassword modifies existing password. + rpc UpdatePassword(UpdatePasswordReq) returns (UpdatePasswordResp) {}; + // DeletePassword deletes the password. + rpc DeletePassword(DeletePasswordReq) returns (DeletePasswordResp) {}; + // ListPassword lists all password entries. + rpc ListPasswords(ListPasswordReq) returns (ListPasswordResp) {}; + // GetVersion returns version information of the server. + rpc GetVersion(VersionReq) returns (VersionResp) {}; + // ListRefresh lists all the refresh token entries for a particular user. + rpc ListRefresh(ListRefreshReq) returns (ListRefreshResp) {}; + // RevokeRefresh revokes the refresh token for the provided user-client pair. + // + // Note that each user-client pair can have only one refresh token at a time. + rpc RevokeRefresh(RevokeRefreshReq) returns (RevokeRefreshResp) {}; + // VerifyPassword returns whether a password matches a hash for a specific email or not. + rpc VerifyPassword(VerifyPasswordReq) returns (VerifyPasswordResp) {}; +} diff --git a/vendor/github.com/dexidp/dex/api/v2/api_grpc.pb.go b/vendor/github.com/dexidp/dex/api/v2/api_grpc.pb.go new file mode 100644 index 00000000..b2a1900b --- /dev/null +++ b/vendor/github.com/dexidp/dex/api/v2/api_grpc.pb.go @@ -0,0 +1,544 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.12 +// source: api/v2/api.proto + +package api + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Dex_GetClient_FullMethodName = "/api.Dex/GetClient" + Dex_CreateClient_FullMethodName = "/api.Dex/CreateClient" + Dex_UpdateClient_FullMethodName = "/api.Dex/UpdateClient" + Dex_DeleteClient_FullMethodName = "/api.Dex/DeleteClient" + Dex_CreatePassword_FullMethodName = "/api.Dex/CreatePassword" + Dex_UpdatePassword_FullMethodName = "/api.Dex/UpdatePassword" + Dex_DeletePassword_FullMethodName = "/api.Dex/DeletePassword" + Dex_ListPasswords_FullMethodName = "/api.Dex/ListPasswords" + Dex_GetVersion_FullMethodName = "/api.Dex/GetVersion" + Dex_ListRefresh_FullMethodName = "/api.Dex/ListRefresh" + Dex_RevokeRefresh_FullMethodName = "/api.Dex/RevokeRefresh" + Dex_VerifyPassword_FullMethodName = "/api.Dex/VerifyPassword" +) + +// DexClient is the client API for Dex service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DexClient interface { + // GetClient gets a client. + GetClient(ctx context.Context, in *GetClientReq, opts ...grpc.CallOption) (*GetClientResp, error) + // CreateClient creates a client. + CreateClient(ctx context.Context, in *CreateClientReq, opts ...grpc.CallOption) (*CreateClientResp, error) + // UpdateClient updates an existing client + UpdateClient(ctx context.Context, in *UpdateClientReq, opts ...grpc.CallOption) (*UpdateClientResp, error) + // DeleteClient deletes the provided client. + DeleteClient(ctx context.Context, in *DeleteClientReq, opts ...grpc.CallOption) (*DeleteClientResp, error) + // CreatePassword creates a password. + CreatePassword(ctx context.Context, in *CreatePasswordReq, opts ...grpc.CallOption) (*CreatePasswordResp, error) + // UpdatePassword modifies existing password. + UpdatePassword(ctx context.Context, in *UpdatePasswordReq, opts ...grpc.CallOption) (*UpdatePasswordResp, error) + // DeletePassword deletes the password. + DeletePassword(ctx context.Context, in *DeletePasswordReq, opts ...grpc.CallOption) (*DeletePasswordResp, error) + // ListPassword lists all password entries. + ListPasswords(ctx context.Context, in *ListPasswordReq, opts ...grpc.CallOption) (*ListPasswordResp, error) + // GetVersion returns version information of the server. + GetVersion(ctx context.Context, in *VersionReq, opts ...grpc.CallOption) (*VersionResp, error) + // ListRefresh lists all the refresh token entries for a particular user. + ListRefresh(ctx context.Context, in *ListRefreshReq, opts ...grpc.CallOption) (*ListRefreshResp, error) + // RevokeRefresh revokes the refresh token for the provided user-client pair. + // + // Note that each user-client pair can have only one refresh token at a time. + RevokeRefresh(ctx context.Context, in *RevokeRefreshReq, opts ...grpc.CallOption) (*RevokeRefreshResp, error) + // VerifyPassword returns whether a password matches a hash for a specific email or not. + VerifyPassword(ctx context.Context, in *VerifyPasswordReq, opts ...grpc.CallOption) (*VerifyPasswordResp, error) +} + +type dexClient struct { + cc grpc.ClientConnInterface +} + +func NewDexClient(cc grpc.ClientConnInterface) DexClient { + return &dexClient{cc} +} + +func (c *dexClient) GetClient(ctx context.Context, in *GetClientReq, opts ...grpc.CallOption) (*GetClientResp, error) { + out := new(GetClientResp) + err := c.cc.Invoke(ctx, Dex_GetClient_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) CreateClient(ctx context.Context, in *CreateClientReq, opts ...grpc.CallOption) (*CreateClientResp, error) { + out := new(CreateClientResp) + err := c.cc.Invoke(ctx, Dex_CreateClient_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) UpdateClient(ctx context.Context, in *UpdateClientReq, opts ...grpc.CallOption) (*UpdateClientResp, error) { + out := new(UpdateClientResp) + err := c.cc.Invoke(ctx, Dex_UpdateClient_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) DeleteClient(ctx context.Context, in *DeleteClientReq, opts ...grpc.CallOption) (*DeleteClientResp, error) { + out := new(DeleteClientResp) + err := c.cc.Invoke(ctx, Dex_DeleteClient_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) CreatePassword(ctx context.Context, in *CreatePasswordReq, opts ...grpc.CallOption) (*CreatePasswordResp, error) { + out := new(CreatePasswordResp) + err := c.cc.Invoke(ctx, Dex_CreatePassword_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) UpdatePassword(ctx context.Context, in *UpdatePasswordReq, opts ...grpc.CallOption) (*UpdatePasswordResp, error) { + out := new(UpdatePasswordResp) + err := c.cc.Invoke(ctx, Dex_UpdatePassword_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) DeletePassword(ctx context.Context, in *DeletePasswordReq, opts ...grpc.CallOption) (*DeletePasswordResp, error) { + out := new(DeletePasswordResp) + err := c.cc.Invoke(ctx, Dex_DeletePassword_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) ListPasswords(ctx context.Context, in *ListPasswordReq, opts ...grpc.CallOption) (*ListPasswordResp, error) { + out := new(ListPasswordResp) + err := c.cc.Invoke(ctx, Dex_ListPasswords_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) GetVersion(ctx context.Context, in *VersionReq, opts ...grpc.CallOption) (*VersionResp, error) { + out := new(VersionResp) + err := c.cc.Invoke(ctx, Dex_GetVersion_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) ListRefresh(ctx context.Context, in *ListRefreshReq, opts ...grpc.CallOption) (*ListRefreshResp, error) { + out := new(ListRefreshResp) + err := c.cc.Invoke(ctx, Dex_ListRefresh_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) RevokeRefresh(ctx context.Context, in *RevokeRefreshReq, opts ...grpc.CallOption) (*RevokeRefreshResp, error) { + out := new(RevokeRefreshResp) + err := c.cc.Invoke(ctx, Dex_RevokeRefresh_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dexClient) VerifyPassword(ctx context.Context, in *VerifyPasswordReq, opts ...grpc.CallOption) (*VerifyPasswordResp, error) { + out := new(VerifyPasswordResp) + err := c.cc.Invoke(ctx, Dex_VerifyPassword_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DexServer is the server API for Dex service. +// All implementations must embed UnimplementedDexServer +// for forward compatibility +type DexServer interface { + // GetClient gets a client. + GetClient(context.Context, *GetClientReq) (*GetClientResp, error) + // CreateClient creates a client. + CreateClient(context.Context, *CreateClientReq) (*CreateClientResp, error) + // UpdateClient updates an existing client + UpdateClient(context.Context, *UpdateClientReq) (*UpdateClientResp, error) + // DeleteClient deletes the provided client. + DeleteClient(context.Context, *DeleteClientReq) (*DeleteClientResp, error) + // CreatePassword creates a password. + CreatePassword(context.Context, *CreatePasswordReq) (*CreatePasswordResp, error) + // UpdatePassword modifies existing password. + UpdatePassword(context.Context, *UpdatePasswordReq) (*UpdatePasswordResp, error) + // DeletePassword deletes the password. + DeletePassword(context.Context, *DeletePasswordReq) (*DeletePasswordResp, error) + // ListPassword lists all password entries. + ListPasswords(context.Context, *ListPasswordReq) (*ListPasswordResp, error) + // GetVersion returns version information of the server. + GetVersion(context.Context, *VersionReq) (*VersionResp, error) + // ListRefresh lists all the refresh token entries for a particular user. + ListRefresh(context.Context, *ListRefreshReq) (*ListRefreshResp, error) + // RevokeRefresh revokes the refresh token for the provided user-client pair. + // + // Note that each user-client pair can have only one refresh token at a time. + RevokeRefresh(context.Context, *RevokeRefreshReq) (*RevokeRefreshResp, error) + // VerifyPassword returns whether a password matches a hash for a specific email or not. + VerifyPassword(context.Context, *VerifyPasswordReq) (*VerifyPasswordResp, error) + mustEmbedUnimplementedDexServer() +} + +// UnimplementedDexServer must be embedded to have forward compatible implementations. +type UnimplementedDexServer struct { +} + +func (UnimplementedDexServer) GetClient(context.Context, *GetClientReq) (*GetClientResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetClient not implemented") +} +func (UnimplementedDexServer) CreateClient(context.Context, *CreateClientReq) (*CreateClientResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateClient not implemented") +} +func (UnimplementedDexServer) UpdateClient(context.Context, *UpdateClientReq) (*UpdateClientResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateClient not implemented") +} +func (UnimplementedDexServer) DeleteClient(context.Context, *DeleteClientReq) (*DeleteClientResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteClient not implemented") +} +func (UnimplementedDexServer) CreatePassword(context.Context, *CreatePasswordReq) (*CreatePasswordResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreatePassword not implemented") +} +func (UnimplementedDexServer) UpdatePassword(context.Context, *UpdatePasswordReq) (*UpdatePasswordResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdatePassword not implemented") +} +func (UnimplementedDexServer) DeletePassword(context.Context, *DeletePasswordReq) (*DeletePasswordResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeletePassword not implemented") +} +func (UnimplementedDexServer) ListPasswords(context.Context, *ListPasswordReq) (*ListPasswordResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListPasswords not implemented") +} +func (UnimplementedDexServer) GetVersion(context.Context, *VersionReq) (*VersionResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented") +} +func (UnimplementedDexServer) ListRefresh(context.Context, *ListRefreshReq) (*ListRefreshResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListRefresh not implemented") +} +func (UnimplementedDexServer) RevokeRefresh(context.Context, *RevokeRefreshReq) (*RevokeRefreshResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method RevokeRefresh not implemented") +} +func (UnimplementedDexServer) VerifyPassword(context.Context, *VerifyPasswordReq) (*VerifyPasswordResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyPassword not implemented") +} +func (UnimplementedDexServer) mustEmbedUnimplementedDexServer() {} + +// UnsafeDexServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DexServer will +// result in compilation errors. +type UnsafeDexServer interface { + mustEmbedUnimplementedDexServer() +} + +func RegisterDexServer(s grpc.ServiceRegistrar, srv DexServer) { + s.RegisterService(&Dex_ServiceDesc, srv) +} + +func _Dex_GetClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClientReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).GetClient(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_GetClient_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).GetClient(ctx, req.(*GetClientReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_CreateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClientReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).CreateClient(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_CreateClient_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).CreateClient(ctx, req.(*CreateClientReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_UpdateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClientReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).UpdateClient(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_UpdateClient_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).UpdateClient(ctx, req.(*UpdateClientReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_DeleteClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClientReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).DeleteClient(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_DeleteClient_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).DeleteClient(ctx, req.(*DeleteClientReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_CreatePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreatePasswordReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).CreatePassword(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_CreatePassword_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).CreatePassword(ctx, req.(*CreatePasswordReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_UpdatePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdatePasswordReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).UpdatePassword(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_UpdatePassword_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).UpdatePassword(ctx, req.(*UpdatePasswordReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_DeletePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeletePasswordReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).DeletePassword(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_DeletePassword_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).DeletePassword(ctx, req.(*DeletePasswordReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_ListPasswords_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPasswordReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).ListPasswords(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_ListPasswords_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).ListPasswords(ctx, req.(*ListPasswordReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VersionReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).GetVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_GetVersion_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).GetVersion(ctx, req.(*VersionReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_ListRefresh_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListRefreshReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).ListRefresh(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_ListRefresh_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).ListRefresh(ctx, req.(*ListRefreshReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_RevokeRefresh_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RevokeRefreshReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).RevokeRefresh(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_RevokeRefresh_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).RevokeRefresh(ctx, req.(*RevokeRefreshReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dex_VerifyPassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyPasswordReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DexServer).VerifyPassword(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Dex_VerifyPassword_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DexServer).VerifyPassword(ctx, req.(*VerifyPasswordReq)) + } + return interceptor(ctx, in, info, handler) +} + +// Dex_ServiceDesc is the grpc.ServiceDesc for Dex service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Dex_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "api.Dex", + HandlerType: (*DexServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetClient", + Handler: _Dex_GetClient_Handler, + }, + { + MethodName: "CreateClient", + Handler: _Dex_CreateClient_Handler, + }, + { + MethodName: "UpdateClient", + Handler: _Dex_UpdateClient_Handler, + }, + { + MethodName: "DeleteClient", + Handler: _Dex_DeleteClient_Handler, + }, + { + MethodName: "CreatePassword", + Handler: _Dex_CreatePassword_Handler, + }, + { + MethodName: "UpdatePassword", + Handler: _Dex_UpdatePassword_Handler, + }, + { + MethodName: "DeletePassword", + Handler: _Dex_DeletePassword_Handler, + }, + { + MethodName: "ListPasswords", + Handler: _Dex_ListPasswords_Handler, + }, + { + MethodName: "GetVersion", + Handler: _Dex_GetVersion_Handler, + }, + { + MethodName: "ListRefresh", + Handler: _Dex_ListRefresh_Handler, + }, + { + MethodName: "RevokeRefresh", + Handler: _Dex_RevokeRefresh_Handler, + }, + { + MethodName: "VerifyPassword", + Handler: _Dex_VerifyPassword_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api/v2/api.proto", +} diff --git a/vendor/github.com/dexidp/dex/cmd/dex/BUILD b/vendor/github.com/dexidp/dex/cmd/dex/BUILD new file mode 100644 index 00000000..8ee4e528 --- /dev/null +++ b/vendor/github.com/dexidp/dex/cmd/dex/BUILD @@ -0,0 +1,47 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "dex_lib", + srcs = [ + "config.go", + "main.go", + "serve.go", + "version.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/cmd/dex", + importpath = "github.com/dexidp/dex/cmd/dex", + visibility = ["//visibility:private"], + deps = [ + "//vendor/github.com/AppsFlyer/go-sundheit", + "//vendor/github.com/AppsFlyer/go-sundheit/checks", + "//vendor/github.com/AppsFlyer/go-sundheit/http", + "//vendor/github.com/dexidp/dex/api/v2:api", + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/github.com/dexidp/dex/server", + "//vendor/github.com/dexidp/dex/storage", + "//vendor/github.com/dexidp/dex/storage/ent", + "//vendor/github.com/dexidp/dex/storage/etcd", + "//vendor/github.com/dexidp/dex/storage/kubernetes", + "//vendor/github.com/dexidp/dex/storage/memory", + "//vendor/github.com/dexidp/dex/storage/sql", + "//vendor/github.com/fsnotify/fsnotify", + "//vendor/github.com/ghodss/yaml", + "//vendor/github.com/grpc-ecosystem/go-grpc-prometheus", + "//vendor/github.com/oklog/run", + "//vendor/github.com/prometheus/client_golang/prometheus", + "//vendor/github.com/prometheus/client_golang/prometheus/collectors", + "//vendor/github.com/prometheus/client_golang/prometheus/promhttp", + "//vendor/github.com/sirupsen/logrus", + "//vendor/github.com/spf13/cobra", + "//vendor/golang.org/x/crypto/bcrypt", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//reflection", + ], +) + +go_binary( + name = "dex", + embed = [":dex_lib"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/dexidp/dex/cmd/dex/config.go b/vendor/github.com/dexidp/dex/cmd/dex/config.go new file mode 100644 index 00000000..831156fd --- /dev/null +++ b/vendor/github.com/dexidp/dex/cmd/dex/config.go @@ -0,0 +1,356 @@ +package main + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "os" + "strconv" + "strings" + + "golang.org/x/crypto/bcrypt" + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/server" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent" + "github.com/dexidp/dex/storage/etcd" + "github.com/dexidp/dex/storage/kubernetes" + "github.com/dexidp/dex/storage/memory" + "github.com/dexidp/dex/storage/sql" +) + +// Config is the config format for the main application. +type Config struct { + Issuer string `json:"issuer"` + Storage Storage `json:"storage"` + Web Web `json:"web"` + Telemetry Telemetry `json:"telemetry"` + OAuth2 OAuth2 `json:"oauth2"` + GRPC GRPC `json:"grpc"` + Expiry Expiry `json:"expiry"` + Logger Logger `json:"logger"` + + Frontend server.WebConfig `json:"frontend"` + + // StaticConnectors are user defined connectors specified in the ConfigMap + // Write operations, like updating a connector, will fail. + StaticConnectors []Connector `json:"connectors"` + + // StaticClients cause the server to use this list of clients rather than + // querying the storage. Write operations, like creating a client, will fail. + StaticClients []storage.Client `json:"staticClients"` + + // If enabled, the server will maintain a list of passwords which can be used + // to identify a user. + EnablePasswordDB bool `json:"enablePasswordDB"` + + // StaticPasswords cause the server use this list of passwords rather than + // querying the storage. Cannot be specified without enabling a passwords + // database. + StaticPasswords []password `json:"staticPasswords"` +} + +// Validate the configuration +func (c Config) Validate() error { + // Fast checks. Perform these first for a more responsive CLI. + checks := []struct { + bad bool + errMsg string + }{ + {c.Issuer == "", "no issuer specified in config file"}, + {!c.EnablePasswordDB && len(c.StaticPasswords) != 0, "cannot specify static passwords without enabling password db"}, + {c.Storage.Config == nil, "no storage supplied in config file"}, + {c.Web.HTTP == "" && c.Web.HTTPS == "", "must supply a HTTP/HTTPS address to listen on"}, + {c.Web.HTTPS != "" && c.Web.TLSCert == "", "no cert specified for HTTPS"}, + {c.Web.HTTPS != "" && c.Web.TLSKey == "", "no private key specified for HTTPS"}, + {c.GRPC.TLSCert != "" && c.GRPC.Addr == "", "no address specified for gRPC"}, + {c.GRPC.TLSKey != "" && c.GRPC.Addr == "", "no address specified for gRPC"}, + {(c.GRPC.TLSCert == "") != (c.GRPC.TLSKey == ""), "must specific both a gRPC TLS cert and key"}, + {c.GRPC.TLSCert == "" && c.GRPC.TLSClientCA != "", "cannot specify gRPC TLS client CA without a gRPC TLS cert"}, + } + + var checkErrors []string + + for _, check := range checks { + if check.bad { + checkErrors = append(checkErrors, check.errMsg) + } + } + if len(checkErrors) != 0 { + return fmt.Errorf("invalid Config:\n\t-\t%s", strings.Join(checkErrors, "\n\t-\t")) + } + return nil +} + +type password storage.Password + +func (p *password) UnmarshalJSON(b []byte) error { + var data struct { + Email string `json:"email"` + Username string `json:"username"` + UserID string `json:"userID"` + Hash string `json:"hash"` + HashFromEnv string `json:"hashFromEnv"` + } + if err := json.Unmarshal(b, &data); err != nil { + return err + } + *p = password(storage.Password{ + Email: data.Email, + Username: data.Username, + UserID: data.UserID, + }) + if len(data.Hash) == 0 && len(data.HashFromEnv) > 0 { + data.Hash = os.Getenv(data.HashFromEnv) + } + if len(data.Hash) == 0 { + return fmt.Errorf("no password hash provided") + } + + // If this value is a valid bcrypt, use it. + _, bcryptErr := bcrypt.Cost([]byte(data.Hash)) + if bcryptErr == nil { + p.Hash = []byte(data.Hash) + return nil + } + + // For backwards compatibility try to base64 decode this value. + hashBytes, err := base64.StdEncoding.DecodeString(data.Hash) + if err != nil { + return fmt.Errorf("malformed bcrypt hash: %v", bcryptErr) + } + if _, err := bcrypt.Cost(hashBytes); err != nil { + return fmt.Errorf("malformed bcrypt hash: %v", err) + } + p.Hash = hashBytes + return nil +} + +// OAuth2 describes enabled OAuth2 extensions. +type OAuth2 struct { + // list of allowed grant types, + // defaults to all supported types + GrantTypes []string `json:"grantTypes"` + + ResponseTypes []string `json:"responseTypes"` + // If specified, do not prompt the user to approve client authorization. The + // act of logging in implies authorization. + SkipApprovalScreen bool `json:"skipApprovalScreen"` + // If specified, show the connector selection screen even if there's only one + AlwaysShowLoginScreen bool `json:"alwaysShowLoginScreen"` + // This is the connector that can be used for password grant + PasswordConnector string `json:"passwordConnector"` +} + +// Web is the config format for the HTTP server. +type Web struct { + HTTP string `json:"http"` + HTTPS string `json:"https"` + TLSCert string `json:"tlsCert"` + TLSKey string `json:"tlsKey"` + AllowedOrigins []string `json:"allowedOrigins"` +} + +// Telemetry is the config format for telemetry including the HTTP server config. +type Telemetry struct { + HTTP string `json:"http"` + // EnableProfiling makes profiling endpoints available via web interface host:port/debug/pprof/ + EnableProfiling bool `json:"enableProfiling"` +} + +// GRPC is the config for the gRPC API. +type GRPC struct { + // The port to listen on. + Addr string `json:"addr"` + TLSCert string `json:"tlsCert"` + TLSKey string `json:"tlsKey"` + TLSClientCA string `json:"tlsClientCA"` + Reflection bool `json:"reflection"` +} + +// Storage holds app's storage configuration. +type Storage struct { + Type string `json:"type"` + Config StorageConfig `json:"config"` +} + +// StorageConfig is a configuration that can create a storage. +type StorageConfig interface { + Open(logger log.Logger) (storage.Storage, error) +} + +var ( + _ StorageConfig = (*etcd.Etcd)(nil) + _ StorageConfig = (*kubernetes.Config)(nil) + _ StorageConfig = (*memory.Config)(nil) + _ StorageConfig = (*sql.SQLite3)(nil) + _ StorageConfig = (*sql.Postgres)(nil) + _ StorageConfig = (*sql.MySQL)(nil) + _ StorageConfig = (*ent.SQLite3)(nil) + _ StorageConfig = (*ent.Postgres)(nil) + _ StorageConfig = (*ent.MySQL)(nil) +) + +func getORMBasedSQLStorage(normal, entBased StorageConfig) func() StorageConfig { + return func() StorageConfig { + switch os.Getenv("DEX_ENT_ENABLED") { + case "true", "yes": + return entBased + default: + return normal + } + } +} + +var storages = map[string]func() StorageConfig{ + "etcd": func() StorageConfig { return new(etcd.Etcd) }, + "kubernetes": func() StorageConfig { return new(kubernetes.Config) }, + "memory": func() StorageConfig { return new(memory.Config) }, + "sqlite3": getORMBasedSQLStorage(&sql.SQLite3{}, &ent.SQLite3{}), + "postgres": getORMBasedSQLStorage(&sql.Postgres{}, &ent.Postgres{}), + "mysql": getORMBasedSQLStorage(&sql.MySQL{}, &ent.MySQL{}), +} + +// isExpandEnvEnabled returns if os.ExpandEnv should be used for each storage and connector config. +// Disabling this feature avoids surprises e.g. if the LDAP bind password contains a dollar character. +// Returns false if the env variable "DEX_EXPAND_ENV" is a falsy string, e.g. "false". +// Returns true if the env variable is unset or a truthy string, e.g. "true", or can't be parsed as bool. +func isExpandEnvEnabled() bool { + enabled, err := strconv.ParseBool(os.Getenv("DEX_EXPAND_ENV")) + if err != nil { + // Unset, empty string or can't be parsed as bool: Default = true. + return true + } + return enabled +} + +// UnmarshalJSON allows Storage to implement the unmarshaler interface to +// dynamically determine the type of the storage config. +func (s *Storage) UnmarshalJSON(b []byte) error { + var store struct { + Type string `json:"type"` + Config json.RawMessage `json:"config"` + } + if err := json.Unmarshal(b, &store); err != nil { + return fmt.Errorf("parse storage: %v", err) + } + f, ok := storages[store.Type] + if !ok { + return fmt.Errorf("unknown storage type %q", store.Type) + } + + storageConfig := f() + if len(store.Config) != 0 { + data := []byte(store.Config) + if isExpandEnvEnabled() { + // Caution, we're expanding in the raw JSON/YAML source. This may not be what the admin expects. + data = []byte(os.ExpandEnv(string(store.Config))) + } + if err := json.Unmarshal(data, storageConfig); err != nil { + return fmt.Errorf("parse storage config: %v", err) + } + } + *s = Storage{ + Type: store.Type, + Config: storageConfig, + } + return nil +} + +// Connector is a magical type that can unmarshal YAML dynamically. The +// Type field determines the connector type, which is then customized for Config. +type Connector struct { + Type string `json:"type"` + Name string `json:"name"` + ID string `json:"id"` + + Config server.ConnectorConfig `json:"config"` +} + +// UnmarshalJSON allows Connector to implement the unmarshaler interface to +// dynamically determine the type of the connector config. +func (c *Connector) UnmarshalJSON(b []byte) error { + var conn struct { + Type string `json:"type"` + Name string `json:"name"` + ID string `json:"id"` + + Config json.RawMessage `json:"config"` + } + if err := json.Unmarshal(b, &conn); err != nil { + return fmt.Errorf("parse connector: %v", err) + } + f, ok := server.ConnectorsConfig[conn.Type] + if !ok { + return fmt.Errorf("unknown connector type %q", conn.Type) + } + + connConfig := f() + if len(conn.Config) != 0 { + data := []byte(conn.Config) + if isExpandEnvEnabled() { + // Caution, we're expanding in the raw JSON/YAML source. This may not be what the admin expects. + data = []byte(os.ExpandEnv(string(conn.Config))) + } + if err := json.Unmarshal(data, connConfig); err != nil { + return fmt.Errorf("parse connector config: %v", err) + } + } + *c = Connector{ + Type: conn.Type, + Name: conn.Name, + ID: conn.ID, + Config: connConfig, + } + return nil +} + +// ToStorageConnector converts an object to storage connector type. +func ToStorageConnector(c Connector) (storage.Connector, error) { + data, err := json.Marshal(c.Config) + if err != nil { + return storage.Connector{}, fmt.Errorf("failed to marshal connector config: %v", err) + } + + return storage.Connector{ + ID: c.ID, + Type: c.Type, + Name: c.Name, + Config: data, + }, nil +} + +// Expiry holds configuration for the validity period of components. +type Expiry struct { + // SigningKeys defines the duration of time after which the SigningKeys will be rotated. + SigningKeys string `json:"signingKeys"` + + // IdTokens defines the duration of time for which the IdTokens will be valid. + IDTokens string `json:"idTokens"` + + // AuthRequests defines the duration of time for which the AuthRequests will be valid. + AuthRequests string `json:"authRequests"` + + // DeviceRequests defines the duration of time for which the DeviceRequests will be valid. + DeviceRequests string `json:"deviceRequests"` + + // RefreshTokens defines refresh tokens expiry policy + RefreshTokens RefreshToken `json:"refreshTokens"` +} + +// Logger holds configuration required to customize logging for dex. +type Logger struct { + // Level sets logging level severity. + Level string `json:"level"` + + // Format specifies the format to be used for logging. + Format string `json:"format"` +} + +type RefreshToken struct { + DisableRotation bool `json:"disableRotation"` + ReuseInterval string `json:"reuseInterval"` + AbsoluteLifetime string `json:"absoluteLifetime"` + ValidIfNotUsedFor string `json:"validIfNotUsedFor"` +} diff --git a/vendor/github.com/dexidp/dex/cmd/dex/main.go b/vendor/github.com/dexidp/dex/cmd/dex/main.go new file mode 100644 index 00000000..be334d92 --- /dev/null +++ b/vendor/github.com/dexidp/dex/cmd/dex/main.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +func commandRoot() *cobra.Command { + rootCmd := &cobra.Command{ + Use: "dex", + Run: func(cmd *cobra.Command, args []string) { + cmd.Help() + os.Exit(2) + }, + } + rootCmd.AddCommand(commandServe()) + rootCmd.AddCommand(commandVersion()) + return rootCmd +} + +func main() { + if err := commandRoot().Execute(); err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(2) + } +} diff --git a/vendor/github.com/dexidp/dex/cmd/dex/serve.go b/vendor/github.com/dexidp/dex/cmd/dex/serve.go new file mode 100644 index 00000000..47b090ae --- /dev/null +++ b/vendor/github.com/dexidp/dex/cmd/dex/serve.go @@ -0,0 +1,684 @@ +package main + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "net/http" + "net/http/pprof" + "os" + "os/signal" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + + gosundheit "github.com/AppsFlyer/go-sundheit" + "github.com/AppsFlyer/go-sundheit/checks" + gosundheithttp "github.com/AppsFlyer/go-sundheit/http" + "github.com/fsnotify/fsnotify" + "github.com/ghodss/yaml" + grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/oklog/run" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/reflection" + + "github.com/dexidp/dex/api/v2" + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/server" + "github.com/dexidp/dex/storage" +) + +type serveOptions struct { + // Config file path + config string + + // Flags + webHTTPAddr string + webHTTPSAddr string + telemetryAddr string + grpcAddr string +} + +func commandServe() *cobra.Command { + options := serveOptions{} + + cmd := &cobra.Command{ + Use: "serve [flags] [config file]", + Short: "Launch Dex", + Example: "dex serve config.yaml", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + cmd.SilenceUsage = true + cmd.SilenceErrors = true + + options.config = args[0] + + return runServe(options) + }, + } + + flags := cmd.Flags() + + flags.StringVar(&options.webHTTPAddr, "web-http-addr", "", "Web HTTP address") + flags.StringVar(&options.webHTTPSAddr, "web-https-addr", "", "Web HTTPS address") + flags.StringVar(&options.telemetryAddr, "telemetry-addr", "", "Telemetry address") + flags.StringVar(&options.grpcAddr, "grpc-addr", "", "gRPC API address") + + return cmd +} + +func runServe(options serveOptions) error { + configFile := options.config + configData, err := os.ReadFile(configFile) + if err != nil { + return fmt.Errorf("failed to read config file %s: %v", configFile, err) + } + + var c Config + if err := yaml.Unmarshal(configData, &c); err != nil { + return fmt.Errorf("error parse config file %s: %v", configFile, err) + } + + applyConfigOverrides(options, &c) + + logger, err := newLogger(c.Logger.Level, c.Logger.Format) + if err != nil { + return fmt.Errorf("invalid config: %v", err) + } + + logger.Infof( + "Dex Version: %s, Go Version: %s, Go OS/ARCH: %s %s", + version, + runtime.Version(), + runtime.GOOS, + runtime.GOARCH, + ) + + if c.Logger.Level != "" { + logger.Infof("config using log level: %s", c.Logger.Level) + } + if err := c.Validate(); err != nil { + return err + } + + logger.Infof("config issuer: %s", c.Issuer) + + prometheusRegistry := prometheus.NewRegistry() + err = prometheusRegistry.Register(collectors.NewGoCollector()) + if err != nil { + return fmt.Errorf("failed to register Go runtime metrics: %v", err) + } + + err = prometheusRegistry.Register(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) + if err != nil { + return fmt.Errorf("failed to register process metrics: %v", err) + } + + grpcMetrics := grpcprometheus.NewServerMetrics() + err = prometheusRegistry.Register(grpcMetrics) + if err != nil { + return fmt.Errorf("failed to register gRPC server metrics: %v", err) + } + + var grpcOptions []grpc.ServerOption + + allowedTLSCiphers := []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + } + + if c.GRPC.TLSCert != "" { + baseTLSConfig := &tls.Config{ + MinVersion: tls.VersionTLS12, + CipherSuites: allowedTLSCiphers, + PreferServerCipherSuites: true, + } + + tlsConfig, err := newTLSReloader(logger, c.GRPC.TLSCert, c.GRPC.TLSKey, c.GRPC.TLSClientCA, baseTLSConfig) + if err != nil { + return fmt.Errorf("invalid config: get gRPC TLS: %v", err) + } + + if c.GRPC.TLSClientCA != "" { + // Only add metrics if client auth is enabled + grpcOptions = append(grpcOptions, + grpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()), + grpc.UnaryInterceptor(grpcMetrics.UnaryServerInterceptor()), + ) + } + + grpcOptions = append(grpcOptions, grpc.Creds(credentials.NewTLS(tlsConfig))) + } + + s, err := c.Storage.Config.Open(logger) + if err != nil { + return fmt.Errorf("failed to initialize storage: %v", err) + } + defer s.Close() + + logger.Infof("config storage: %s", c.Storage.Type) + + if len(c.StaticClients) > 0 { + for i, client := range c.StaticClients { + if client.Name == "" { + return fmt.Errorf("invalid config: Name field is required for a client") + } + if client.ID == "" && client.IDEnv == "" { + return fmt.Errorf("invalid config: ID or IDEnv field is required for a client") + } + if client.IDEnv != "" { + if client.ID != "" { + return fmt.Errorf("invalid config: ID and IDEnv fields are exclusive for client %q", client.ID) + } + c.StaticClients[i].ID = os.Getenv(client.IDEnv) + } + if client.Secret == "" && client.SecretEnv == "" && !client.Public { + return fmt.Errorf("invalid config: Secret or SecretEnv field is required for client %q", client.ID) + } + if client.SecretEnv != "" { + if client.Secret != "" { + return fmt.Errorf("invalid config: Secret and SecretEnv fields are exclusive for client %q", client.ID) + } + c.StaticClients[i].Secret = os.Getenv(client.SecretEnv) + } + logger.Infof("config static client: %s", client.Name) + } + s = storage.WithStaticClients(s, c.StaticClients) + } + if len(c.StaticPasswords) > 0 { + passwords := make([]storage.Password, len(c.StaticPasswords)) + for i, p := range c.StaticPasswords { + passwords[i] = storage.Password(p) + } + s = storage.WithStaticPasswords(s, passwords, logger) + } + + storageConnectors := make([]storage.Connector, len(c.StaticConnectors)) + for i, c := range c.StaticConnectors { + if c.ID == "" || c.Name == "" || c.Type == "" { + return fmt.Errorf("invalid config: ID, Type and Name fields are required for a connector") + } + if c.Config == nil { + return fmt.Errorf("invalid config: no config field for connector %q", c.ID) + } + logger.Infof("config connector: %s", c.ID) + + // convert to a storage connector object + conn, err := ToStorageConnector(c) + if err != nil { + return fmt.Errorf("failed to initialize storage connectors: %v", err) + } + storageConnectors[i] = conn + } + + if c.EnablePasswordDB { + storageConnectors = append(storageConnectors, storage.Connector{ + ID: server.LocalConnector, + Name: "Email", + Type: server.LocalConnector, + }) + logger.Infof("config connector: local passwords enabled") + } + + s = storage.WithStaticConnectors(s, storageConnectors) + + if len(c.OAuth2.ResponseTypes) > 0 { + logger.Infof("config response types accepted: %s", c.OAuth2.ResponseTypes) + } + if c.OAuth2.SkipApprovalScreen { + logger.Infof("config skipping approval screen") + } + if c.OAuth2.PasswordConnector != "" { + logger.Infof("config using password grant connector: %s", c.OAuth2.PasswordConnector) + } + if len(c.Web.AllowedOrigins) > 0 { + logger.Infof("config allowed origins: %s", c.Web.AllowedOrigins) + } + + // explicitly convert to UTC. + now := func() time.Time { return time.Now().UTC() } + + healthChecker := gosundheit.New() + + serverConfig := server.Config{ + AllowedGrantTypes: c.OAuth2.GrantTypes, + SupportedResponseTypes: c.OAuth2.ResponseTypes, + SkipApprovalScreen: c.OAuth2.SkipApprovalScreen, + AlwaysShowLoginScreen: c.OAuth2.AlwaysShowLoginScreen, + PasswordConnector: c.OAuth2.PasswordConnector, + AllowedOrigins: c.Web.AllowedOrigins, + Issuer: c.Issuer, + Storage: s, + Web: c.Frontend, + Logger: logger, + Now: now, + PrometheusRegistry: prometheusRegistry, + HealthChecker: healthChecker, + } + if c.Expiry.SigningKeys != "" { + signingKeys, err := time.ParseDuration(c.Expiry.SigningKeys) + if err != nil { + return fmt.Errorf("invalid config value %q for signing keys expiry: %v", c.Expiry.SigningKeys, err) + } + logger.Infof("config signing keys expire after: %v", signingKeys) + serverConfig.RotateKeysAfter = signingKeys + } + if c.Expiry.IDTokens != "" { + idTokens, err := time.ParseDuration(c.Expiry.IDTokens) + if err != nil { + return fmt.Errorf("invalid config value %q for id token expiry: %v", c.Expiry.IDTokens, err) + } + logger.Infof("config id tokens valid for: %v", idTokens) + serverConfig.IDTokensValidFor = idTokens + } + if c.Expiry.AuthRequests != "" { + authRequests, err := time.ParseDuration(c.Expiry.AuthRequests) + if err != nil { + return fmt.Errorf("invalid config value %q for auth request expiry: %v", c.Expiry.AuthRequests, err) + } + logger.Infof("config auth requests valid for: %v", authRequests) + serverConfig.AuthRequestsValidFor = authRequests + } + if c.Expiry.DeviceRequests != "" { + deviceRequests, err := time.ParseDuration(c.Expiry.DeviceRequests) + if err != nil { + return fmt.Errorf("invalid config value %q for device request expiry: %v", c.Expiry.AuthRequests, err) + } + logger.Infof("config device requests valid for: %v", deviceRequests) + serverConfig.DeviceRequestsValidFor = deviceRequests + } + refreshTokenPolicy, err := server.NewRefreshTokenPolicy( + logger, + c.Expiry.RefreshTokens.DisableRotation, + c.Expiry.RefreshTokens.ValidIfNotUsedFor, + c.Expiry.RefreshTokens.AbsoluteLifetime, + c.Expiry.RefreshTokens.ReuseInterval, + ) + if err != nil { + return fmt.Errorf("invalid refresh token expiration policy config: %v", err) + } + + serverConfig.RefreshTokenPolicy = refreshTokenPolicy + serv, err := server.NewServer(context.Background(), serverConfig) + if err != nil { + return fmt.Errorf("failed to initialize server: %v", err) + } + + telemetryRouter := http.NewServeMux() + telemetryRouter.Handle("/metrics", promhttp.HandlerFor(prometheusRegistry, promhttp.HandlerOpts{})) + + // Configure health checker + { + handler := gosundheithttp.HandleHealthJSON(healthChecker) + telemetryRouter.Handle("/healthz", handler) + + // Kubernetes style health checks + telemetryRouter.HandleFunc("/healthz/live", func(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write([]byte("ok")) + }) + telemetryRouter.Handle("/healthz/ready", handler) + } + + healthChecker.RegisterCheck( + &checks.CustomCheck{ + CheckName: "storage", + CheckFunc: storage.NewCustomHealthCheckFunc(serverConfig.Storage, serverConfig.Now), + }, + gosundheit.ExecutionPeriod(15*time.Second), + gosundheit.InitiallyPassing(true), + ) + + var group run.Group + + // Set up telemetry server + if c.Telemetry.HTTP != "" { + const name = "telemetry" + + logger.Infof("listening (%s) on %s", name, c.Telemetry.HTTP) + + l, err := net.Listen("tcp", c.Telemetry.HTTP) + if err != nil { + return fmt.Errorf("listening (%s) on %s: %v", name, c.Telemetry.HTTP, err) + } + + if c.Telemetry.EnableProfiling { + pprofHandler(telemetryRouter) + } + + server := &http.Server{ + Handler: telemetryRouter, + } + defer server.Close() + + group.Add(func() error { + return server.Serve(l) + }, func(err error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + logger.Debugf("starting graceful shutdown (%s)", name) + if err := server.Shutdown(ctx); err != nil { + logger.Errorf("graceful shutdown (%s): %v", name, err) + } + }) + } + + // Set up http server + if c.Web.HTTP != "" { + const name = "http" + + logger.Infof("listening (%s) on %s", name, c.Web.HTTP) + + l, err := net.Listen("tcp", c.Web.HTTP) + if err != nil { + return fmt.Errorf("listening (%s) on %s: %v", name, c.Web.HTTP, err) + } + + server := &http.Server{ + Handler: serv, + } + defer server.Close() + + group.Add(func() error { + return server.Serve(l) + }, func(err error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + logger.Debugf("starting graceful shutdown (%s)", name) + if err := server.Shutdown(ctx); err != nil { + logger.Errorf("graceful shutdown (%s): %v", name, err) + } + }) + } + + // Set up https server + if c.Web.HTTPS != "" { + const name = "https" + + logger.Infof("listening (%s) on %s", name, c.Web.HTTPS) + + l, err := net.Listen("tcp", c.Web.HTTPS) + if err != nil { + return fmt.Errorf("listening (%s) on %s: %v", name, c.Web.HTTPS, err) + } + + baseTLSConfig := &tls.Config{ + MinVersion: tls.VersionTLS12, + CipherSuites: allowedTLSCiphers, + PreferServerCipherSuites: true, + } + + tlsConfig, err := newTLSReloader(logger, c.Web.TLSCert, c.Web.TLSKey, "", baseTLSConfig) + if err != nil { + return fmt.Errorf("invalid config: get HTTP TLS: %v", err) + } + + server := &http.Server{ + Handler: serv, + TLSConfig: tlsConfig, + } + defer server.Close() + + group.Add(func() error { + return server.ServeTLS(l, "", "") + }, func(err error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + logger.Debugf("starting graceful shutdown (%s)", name) + if err := server.Shutdown(ctx); err != nil { + logger.Errorf("graceful shutdown (%s): %v", name, err) + } + }) + } + + // Set up grpc server + if c.GRPC.Addr != "" { + logger.Infof("listening (grpc) on %s", c.GRPC.Addr) + + grpcListener, err := net.Listen("tcp", c.GRPC.Addr) + if err != nil { + return fmt.Errorf("listening (grcp) on %s: %w", c.GRPC.Addr, err) + } + + grpcSrv := grpc.NewServer(grpcOptions...) + api.RegisterDexServer(grpcSrv, server.NewAPI(serverConfig.Storage, logger, version)) + + grpcMetrics.InitializeMetrics(grpcSrv) + if c.GRPC.Reflection { + logger.Info("enabling reflection in grpc service") + reflection.Register(grpcSrv) + } + + group.Add(func() error { + return grpcSrv.Serve(grpcListener) + }, func(err error) { + logger.Debugf("starting graceful shutdown (grpc)") + grpcSrv.GracefulStop() + }) + } + + group.Add(run.SignalHandler(context.Background(), os.Interrupt, syscall.SIGTERM)) + if err := group.Run(); err != nil { + if _, ok := err.(run.SignalError); !ok { + return fmt.Errorf("run groups: %w", err) + } + logger.Infof("%v, shutdown now", err) + } + return nil +} + +var ( + logLevels = []string{"debug", "info", "error"} + logFormats = []string{"json", "text"} +) + +type utcFormatter struct { + f logrus.Formatter +} + +func (f *utcFormatter) Format(e *logrus.Entry) ([]byte, error) { + e.Time = e.Time.UTC() + return f.f.Format(e) +} + +func newLogger(level string, format string) (log.Logger, error) { + var logLevel logrus.Level + switch strings.ToLower(level) { + case "debug": + logLevel = logrus.DebugLevel + case "", "info": + logLevel = logrus.InfoLevel + case "error": + logLevel = logrus.ErrorLevel + default: + return nil, fmt.Errorf("log level is not one of the supported values (%s): %s", strings.Join(logLevels, ", "), level) + } + + var formatter utcFormatter + switch strings.ToLower(format) { + case "", "text": + formatter.f = &logrus.TextFormatter{DisableColors: true} + case "json": + formatter.f = &logrus.JSONFormatter{} + default: + return nil, fmt.Errorf("log format is not one of the supported values (%s): %s", strings.Join(logFormats, ", "), format) + } + + return &logrus.Logger{ + Out: os.Stderr, + Formatter: &formatter, + Level: logLevel, + }, nil +} + +func applyConfigOverrides(options serveOptions, config *Config) { + if options.webHTTPAddr != "" { + config.Web.HTTP = options.webHTTPAddr + } + + if options.webHTTPSAddr != "" { + config.Web.HTTPS = options.webHTTPSAddr + } + + if options.telemetryAddr != "" { + config.Telemetry.HTTP = options.telemetryAddr + } + + if options.grpcAddr != "" { + config.GRPC.Addr = options.grpcAddr + } + + if config.Frontend.Dir == "" { + config.Frontend.Dir = os.Getenv("DEX_FRONTEND_DIR") + } + + if len(config.OAuth2.GrantTypes) == 0 { + config.OAuth2.GrantTypes = []string{ + "authorization_code", + "implicit", + "password", + "refresh_token", + "urn:ietf:params:oauth:grant-type:device_code", + "urn:ietf:params:oauth:grant-type:token-exchange", + } + } +} + +func pprofHandler(router *http.ServeMux) { + router.HandleFunc("/debug/pprof/", pprof.Index) + router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + router.HandleFunc("/debug/pprof/profile", pprof.Profile) + router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + router.HandleFunc("/debug/pprof/trace", pprof.Trace) +} + +// newTLSReloader returns a [tls.Config] with GetCertificate or GetConfigForClient set +// to reload certificates from the given paths on SIGHUP or on file creates (atomic update via rename). +func newTLSReloader(logger log.Logger, certFile, keyFile, caFile string, baseConfig *tls.Config) (*tls.Config, error) { + // trigger reload on channel + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, syscall.SIGHUP) + + // files to watch + watchFiles := map[string]struct{}{ + certFile: {}, + keyFile: {}, + } + if caFile != "" { + watchFiles[caFile] = struct{}{} + } + watchDirs := make(map[string]struct{}) // dedupe dirs + for f := range watchFiles { + dir := filepath.Dir(f) + if !strings.HasPrefix(f, dir) { + // normalize name to have ./ prefix if only a local path was provided + // can't pass "" to watcher.Add + watchFiles[dir+string(filepath.Separator)+f] = struct{}{} + } + watchDirs[dir] = struct{}{} + } + // trigger reload on file change + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, fmt.Errorf("create watcher for TLS reloader: %v", err) + } + // recommended by fsnotify: watch the dir to handle renames + // https://pkg.go.dev/github.com/fsnotify/fsnotify#hdr-Watching_files + for dir := range watchDirs { + logger.Debugf("watching dir: %v", dir) + err := watcher.Add(dir) + if err != nil { + return nil, fmt.Errorf("watch dir for TLS reloader: %v", err) + } + } + + // load once outside the goroutine so we can return an error on misconfig + initialConfig, err := loadTLSConfig(certFile, keyFile, caFile, baseConfig) + if err != nil { + return nil, fmt.Errorf("load TLS config: %v", err) + } + + // stored version of current tls config + ptr := &atomic.Pointer[tls.Config]{} + ptr.Store(initialConfig) + + // start background worker to reload certs + go func() { + loop: + for { + select { + case sig := <-sigc: + logger.Debug("reloading cert from signal: %v", sig) + case evt := <-watcher.Events: + if _, ok := watchFiles[evt.Name]; !ok || !evt.Has(fsnotify.Create) { + continue loop + } + logger.Debug("reloading cert from fsnotify: %v %v", evt.Name, evt.Op.String()) + case err := <-watcher.Errors: + logger.Errorf("TLS reloader watch: %v", err) + } + + loaded, err := loadTLSConfig(certFile, keyFile, caFile, baseConfig) + if err != nil { + logger.Errorf("reload TLS config: %v", err) + } + ptr.Store(loaded) + } + }() + + conf := &tls.Config{} + // https://pkg.go.dev/crypto/tls#baseConfig + // Server configurations must set one of Certificates, GetCertificate or GetConfigForClient. + if caFile != "" { + // grpc will use this via tls.Server for mTLS + conf.GetConfigForClient = func(chi *tls.ClientHelloInfo) (*tls.Config, error) { return ptr.Load(), nil } + } else { + // net/http only uses Certificates or GetCertificate + conf.GetCertificate = func(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { return &ptr.Load().Certificates[0], nil } + } + return conf, nil +} + +// loadTLSConfig loads the given file paths into a [tls.Config] +func loadTLSConfig(certFile, keyFile, caFile string, baseConfig *tls.Config) (*tls.Config, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, fmt.Errorf("loading TLS keypair: %v", err) + } + loadedConfig := baseConfig.Clone() // copy + loadedConfig.Certificates = []tls.Certificate{cert} + if caFile != "" { + cPool := x509.NewCertPool() + clientCert, err := os.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("reading from client CA file: %v", err) + } + if !cPool.AppendCertsFromPEM(clientCert) { + return nil, errors.New("failed to parse client CA") + } + + loadedConfig.ClientAuth = tls.RequireAndVerifyClientCert + loadedConfig.ClientCAs = cPool + } + return loadedConfig, nil +} diff --git a/vendor/github.com/dexidp/dex/cmd/dex/version.go b/vendor/github.com/dexidp/dex/cmd/dex/version.go new file mode 100644 index 00000000..99bd318b --- /dev/null +++ b/vendor/github.com/dexidp/dex/cmd/dex/version.go @@ -0,0 +1,26 @@ +package main + +import ( + "fmt" + "runtime" + + "github.com/spf13/cobra" +) + +var version = "DEV" + +func commandVersion() *cobra.Command { + return &cobra.Command{ + Use: "version", + Short: "Print the version and exit", + Run: func(_ *cobra.Command, _ []string) { + fmt.Printf( + "Dex Version: %s\nGo Version: %s\nGo OS/ARCH: %s %s\n", + version, + runtime.Version(), + runtime.GOOS, + runtime.GOARCH, + ) + }, + } +} diff --git a/vendor/github.com/dexidp/dex/connector/BUILD b/vendor/github.com/dexidp/dex/connector/BUILD new file mode 100644 index 00000000..8bf4964f --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/BUILD @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "connector", + srcs = ["connector.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector", + importpath = "github.com/dexidp/dex/connector", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/dexidp/dex/connector/atlassiancrowd/BUILD b/vendor/github.com/dexidp/dex/connector/atlassiancrowd/BUILD new file mode 100644 index 00000000..f62b7941 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/atlassiancrowd/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "atlassiancrowd", + srcs = ["atlassiancrowd.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/atlassiancrowd", + importpath = "github.com/dexidp/dex/connector/atlassiancrowd", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/groups", + "//vendor/github.com/dexidp/dex/pkg/log", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/atlassiancrowd/atlassiancrowd.go b/vendor/github.com/dexidp/dex/connector/atlassiancrowd/atlassiancrowd.go new file mode 100644 index 00000000..aa142203 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/atlassiancrowd/atlassiancrowd.go @@ -0,0 +1,448 @@ +// Package atlassiancrowd provides authentication strategies using Atlassian Crowd. +package atlassiancrowd + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "strings" + "time" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/groups" + "github.com/dexidp/dex/pkg/log" +) + +// Config holds configuration options for Atlassian Crowd connector. +// Crowd connectors require executing two queries, the first to find +// the user based on the username and password given to the connector. +// The second to use the user entry to search for groups. +// +// An example config: +// +// type: atlassian-crowd +// config: +// baseURL: https://crowd.example.com/context +// clientID: applogin +// clientSecret: appP4$$w0rd +// # users can be restricted by a list of groups +// groups: +// - admin +// # Prompt for username field +// usernamePrompt: Login +// preferredUsernameField: name +type Config struct { + BaseURL string `json:"baseURL"` + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + Groups []string `json:"groups"` + + // PreferredUsernameField allows users to set the field to any of the + // following values: "key", "name" or "email". + // If unset, the preferred_username field will remain empty. + PreferredUsernameField string `json:"preferredUsernameField"` + + // UsernamePrompt allows users to override the username attribute (displayed + // in the username/password prompt). If unset, the handler will use. + // "Username". + UsernamePrompt string `json:"usernamePrompt"` +} + +type crowdUser struct { + Key string + Name string + Active bool + Email string +} + +type crowdGroups struct { + Groups []struct { + Name string + } `json:"groups"` +} + +type crowdAuthentication struct { + Token string + User struct { + Name string + } `json:"user"` + CreatedDate uint64 `json:"created-date"` + ExpiryDate uint64 `json:"expiry-date"` +} + +type crowdAuthenticationError struct { + Reason string + Message string +} + +// Open returns a strategy for logging in through Atlassian Crowd +func (c *Config) Open(_ string, logger log.Logger) (connector.Connector, error) { + if c.BaseURL == "" { + return nil, fmt.Errorf("crowd: no baseURL provided for crowd connector") + } + return &crowdConnector{Config: *c, logger: logger}, nil +} + +type crowdConnector struct { + Config + logger log.Logger +} + +var ( + _ connector.PasswordConnector = (*crowdConnector)(nil) + _ connector.RefreshConnector = (*crowdConnector)(nil) +) + +type refreshData struct { + Username string `json:"username"` +} + +func (c *crowdConnector) Login(ctx context.Context, s connector.Scopes, username, password string) (ident connector.Identity, validPass bool, err error) { + // make this check to avoid empty passwords. + if password == "" { + return connector.Identity{}, false, nil + } + + // We want to return a different error if the user's password is incorrect vs + // if there was an error. + var incorrectPass bool + var user crowdUser + + client := c.crowdAPIClient() + + if incorrectPass, err = c.authenticateWithPassword(ctx, client, username, password); err != nil { + return connector.Identity{}, false, err + } + + if incorrectPass { + return connector.Identity{}, false, nil + } + + if user, err = c.user(ctx, client, username); err != nil { + return connector.Identity{}, false, err + } + + ident = c.identityFromCrowdUser(user) + if s.Groups { + userGroups, err := c.getGroups(ctx, client, s.Groups, ident.Username) + if err != nil { + return connector.Identity{}, false, fmt.Errorf("crowd: failed to query groups: %v", err) + } + ident.Groups = userGroups + } + + if s.OfflineAccess { + refresh := refreshData{Username: username} + // Encode entry for following up requests such as the groups query and refresh attempts. + if ident.ConnectorData, err = json.Marshal(refresh); err != nil { + return connector.Identity{}, false, fmt.Errorf("crowd: marshal refresh data: %v", err) + } + } + + return ident, true, nil +} + +func (c *crowdConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { + var data refreshData + if err := json.Unmarshal(ident.ConnectorData, &data); err != nil { + return ident, fmt.Errorf("crowd: failed to unmarshal internal data: %v", err) + } + + var user crowdUser + client := c.crowdAPIClient() + + user, err := c.user(ctx, client, data.Username) + if err != nil { + return ident, fmt.Errorf("crowd: get user %q: %v", data.Username, err) + } + + newIdent := c.identityFromCrowdUser(user) + newIdent.ConnectorData = ident.ConnectorData + + // If user exists, authenticate it to prolong sso session. + err = c.authenticateUser(ctx, client, data.Username) + if err != nil { + return ident, fmt.Errorf("crowd: authenticate user: %v", err) + } + + if s.Groups { + userGroups, err := c.getGroups(ctx, client, s.Groups, newIdent.Username) + if err != nil { + return connector.Identity{}, fmt.Errorf("crowd: failed to query groups: %v", err) + } + newIdent.Groups = userGroups + } + return newIdent, nil +} + +func (c *crowdConnector) Prompt() string { + return c.UsernamePrompt +} + +func (c *crowdConnector) crowdAPIClient() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, + } +} + +// authenticateWithPassword creates a new session for user and validates a password with Crowd API +func (c *crowdConnector) authenticateWithPassword(ctx context.Context, client *http.Client, username string, password string) (invalidPass bool, err error) { + req, err := c.crowdUserManagementRequest(ctx, + "POST", + "/session", + struct { + Username string `json:"username"` + Password string `json:"password"` + }{Username: username, Password: password}, + ) + if err != nil { + return false, fmt.Errorf("crowd: new auth pass api request %v", err) + } + + resp, err := client.Do(req) + if err != nil { + return false, fmt.Errorf("crowd: api request %v", err) + } + defer resp.Body.Close() + + body, err := c.validateCrowdResponse(resp) + if err != nil { + return false, err + } + + if resp.StatusCode != http.StatusCreated { + var authError crowdAuthenticationError + if err := json.Unmarshal(body, &authError); err != nil { + return false, fmt.Errorf("unmarshal auth pass response: %d %v %q", resp.StatusCode, err, string(body)) + } + + if authError.Reason == "INVALID_USER_AUTHENTICATION" { + return true, nil + } + + return false, fmt.Errorf("%s: %s", resp.Status, authError.Message) + } + + var authResponse crowdAuthentication + + if err := json.Unmarshal(body, &authResponse); err != nil { + return false, fmt.Errorf("decode auth response: %v", err) + } + + return false, nil +} + +// authenticateUser creates a new session for user without password validations with Crowd API +func (c *crowdConnector) authenticateUser(ctx context.Context, client *http.Client, username string) error { + req, err := c.crowdUserManagementRequest(ctx, + "POST", + "/session?validate-password=false", + struct { + Username string `json:"username"` + }{Username: username}, + ) + if err != nil { + return fmt.Errorf("crowd: new auth api request %v", err) + } + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("crowd: api request %v", err) + } + defer resp.Body.Close() + + body, err := c.validateCrowdResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("%s: %s", resp.Status, body) + } + + var authResponse crowdAuthentication + + if err := json.Unmarshal(body, &authResponse); err != nil { + return fmt.Errorf("decode auth response: %v", err) + } + + return nil +} + +// user retrieves user info from Crowd API +func (c *crowdConnector) user(ctx context.Context, client *http.Client, username string) (crowdUser, error) { + var user crowdUser + + req, err := c.crowdUserManagementRequest(ctx, + "GET", + fmt.Sprintf("/user?username=%s", username), + nil, + ) + if err != nil { + return user, fmt.Errorf("crowd: new user api request %v", err) + } + + resp, err := client.Do(req) + if err != nil { + return user, fmt.Errorf("crowd: api request %v", err) + } + defer resp.Body.Close() + + body, err := c.validateCrowdResponse(resp) + if err != nil { + return user, err + } + + if resp.StatusCode != http.StatusOK { + return user, fmt.Errorf("%s: %s", resp.Status, body) + } + + if err := json.Unmarshal(body, &user); err != nil { + return user, fmt.Errorf("failed to decode response: %v", err) + } + + return user, nil +} + +// groups retrieves groups from Crowd API +func (c *crowdConnector) groups(ctx context.Context, client *http.Client, username string) (userGroups []string, err error) { + var crowdGroups crowdGroups + + req, err := c.crowdUserManagementRequest(ctx, + "GET", + fmt.Sprintf("/user/group/nested?username=%s", username), + nil, + ) + if err != nil { + return nil, fmt.Errorf("crowd: new groups api request %v", err) + } + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("crowd: api request %v", err) + } + defer resp.Body.Close() + + body, err := c.validateCrowdResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: %s", resp.Status, body) + } + + if err := json.Unmarshal(body, &crowdGroups); err != nil { + return nil, fmt.Errorf("failed to decode response: %v", err) + } + + for _, group := range crowdGroups.Groups { + userGroups = append(userGroups, group.Name) + } + + return userGroups, nil +} + +// identityFromCrowdUser converts crowdUser to Identity +func (c *crowdConnector) identityFromCrowdUser(user crowdUser) connector.Identity { + identity := connector.Identity{ + Username: user.Name, + UserID: user.Key, + Email: user.Email, + EmailVerified: true, + } + + switch c.PreferredUsernameField { + case "key": + identity.PreferredUsername = user.Key + case "name": + identity.PreferredUsername = user.Name + case "email": + identity.PreferredUsername = user.Email + default: + if c.PreferredUsernameField != "" { + c.logger.Warnf("preferred_username left empty. Invalid crowd field mapped to preferred_username: %s", c.PreferredUsernameField) + } + } + + return identity +} + +// getGroups retrieves a list of user's groups and filters it +func (c *crowdConnector) getGroups(ctx context.Context, client *http.Client, groupScope bool, userLogin string) ([]string, error) { + crowdGroups, err := c.groups(ctx, client, userLogin) + if err != nil { + return nil, err + } + + if len(c.Groups) > 0 { + filteredGroups := groups.Filter(crowdGroups, c.Groups) + if len(filteredGroups) == 0 { + return nil, fmt.Errorf("crowd: user %q is not in any of the required groups", userLogin) + } + return filteredGroups, nil + } else if groupScope { + return crowdGroups, nil + } + + return nil, nil +} + +// crowdUserManagementRequest create a http.Request with basic auth, json payload and Accept header +func (c *crowdConnector) crowdUserManagementRequest(ctx context.Context, method string, apiURL string, jsonPayload interface{}) (*http.Request, error) { + var body io.Reader + if jsonPayload != nil { + jsonData, err := json.Marshal(jsonPayload) + if err != nil { + return nil, fmt.Errorf("crowd: marshal API json payload: %v", err) + } + body = bytes.NewReader(jsonData) + } + + req, err := http.NewRequest(method, fmt.Sprintf("%s/rest/usermanagement/1%s", c.BaseURL, apiURL), body) + if err != nil { + return nil, fmt.Errorf("new API req: %v", err) + } + req = req.WithContext(ctx) + + // Crowd API requires a basic auth + req.SetBasicAuth(c.ClientID, c.ClientSecret) + req.Header.Set("Accept", "application/json") + if jsonPayload != nil { + req.Header.Set("Content-type", "application/json") + } + return req, nil +} + +// validateCrowdResponse validates unique not JSON responses from API +func (c *crowdConnector) validateCrowdResponse(resp *http.Response) ([]byte, error) { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("crowd: read user body: %v", err) + } + + if resp.StatusCode == http.StatusForbidden && strings.Contains(string(body), "The server understood the request but refuses to authorize it.") { + c.logger.Debugf("crowd response validation failed: %s", string(body)) + return nil, fmt.Errorf("dex is forbidden from making requests to the Atlassian Crowd application by URL %q", c.BaseURL) + } + + if resp.StatusCode == http.StatusUnauthorized && string(body) == "Application failed to authenticate" { + c.logger.Debugf("crowd response validation failed: %s", string(body)) + return nil, fmt.Errorf("dex failed to authenticate Crowd Application with ID %q", c.ClientID) + } + return body, nil +} diff --git a/vendor/github.com/dexidp/dex/connector/authproxy/BUILD b/vendor/github.com/dexidp/dex/connector/authproxy/BUILD new file mode 100644 index 00000000..0326bac2 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/authproxy/BUILD @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "authproxy", + srcs = ["authproxy.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/authproxy", + importpath = "github.com/dexidp/dex/connector/authproxy", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/log", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/authproxy/authproxy.go b/vendor/github.com/dexidp/dex/connector/authproxy/authproxy.go new file mode 100644 index 00000000..87154121 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/authproxy/authproxy.go @@ -0,0 +1,85 @@ +// Package authproxy implements a connector which relies on external +// authentication (e.g. mod_auth in Apache2) and returns an identity with the +// HTTP header X-Remote-User as verified email. +package authproxy + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/log" +) + +// Config holds the configuration parameters for a connector which returns an +// identity with the HTTP header X-Remote-User as verified email, +// X-Remote-Group and configured staticGroups as user's group. +// Headers retrieved to fetch user's email and group can be configured +// with userHeader and groupHeader. +type Config struct { + UserHeader string `json:"userHeader"` + GroupHeader string `json:"groupHeader"` + Groups []string `json:"staticGroups"` +} + +// Open returns an authentication strategy which requires no user interaction. +func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { + userHeader := c.UserHeader + if userHeader == "" { + userHeader = "X-Remote-User" + } + groupHeader := c.GroupHeader + if groupHeader == "" { + groupHeader = "X-Remote-Group" + } + + return &callback{userHeader: userHeader, groupHeader: groupHeader, logger: logger, pathSuffix: "/" + id, groups: c.Groups}, nil +} + +// Callback is a connector which returns an identity with the HTTP header +// X-Remote-User as verified email. +type callback struct { + userHeader string + groupHeader string + groups []string + logger log.Logger + pathSuffix string +} + +// LoginURL returns the URL to redirect the user to login with. +func (m *callback) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) { + u, err := url.Parse(callbackURL) + if err != nil { + return "", fmt.Errorf("failed to parse callbackURL %q: %v", callbackURL, err) + } + u.Path += m.pathSuffix + v := u.Query() + v.Set("state", state) + u.RawQuery = v.Encode() + return u.String(), nil +} + +// HandleCallback parses the request and returns the user's identity +func (m *callback) HandleCallback(s connector.Scopes, r *http.Request) (connector.Identity, error) { + remoteUser := r.Header.Get(m.userHeader) + if remoteUser == "" { + return connector.Identity{}, fmt.Errorf("required HTTP header %s is not set", m.userHeader) + } + groups := m.groups + headerGroup := r.Header.Get(m.groupHeader) + if headerGroup != "" { + splitheaderGroup := strings.Split(headerGroup, ",") + for i, v := range splitheaderGroup { + splitheaderGroup[i] = strings.TrimSpace(v) + } + groups = append(splitheaderGroup, groups...) + } + return connector.Identity{ + UserID: remoteUser, // TODO: figure out if this is a bad ID value. + Email: remoteUser, + EmailVerified: true, + Groups: groups, + }, nil +} diff --git a/vendor/github.com/dexidp/dex/connector/bitbucketcloud/BUILD b/vendor/github.com/dexidp/dex/connector/bitbucketcloud/BUILD new file mode 100644 index 00000000..f8b20177 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/bitbucketcloud/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "bitbucketcloud", + srcs = ["bitbucketcloud.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/bitbucketcloud", + importpath = "github.com/dexidp/dex/connector/bitbucketcloud", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/groups", + "//vendor/github.com/dexidp/dex/pkg/log", + "@org_golang_x_oauth2//:oauth2", + "@org_golang_x_oauth2//bitbucket", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/bitbucketcloud/bitbucketcloud.go b/vendor/github.com/dexidp/dex/connector/bitbucketcloud/bitbucketcloud.go new file mode 100644 index 00000000..27eafb52 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/bitbucketcloud/bitbucketcloud.go @@ -0,0 +1,468 @@ +// Package bitbucketcloud provides authentication strategies using Bitbucket Cloud. +package bitbucketcloud + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "sync" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/bitbucket" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/groups" + "github.com/dexidp/dex/pkg/log" +) + +const ( + apiURL = "https://api.bitbucket.org/2.0" + // Switch to API v2.0 when the Atlassian platform services are fully available in Bitbucket + legacyAPIURL = "https://api.bitbucket.org/1.0" + // Bitbucket requires this scope to access '/user' API endpoints. + scopeAccount = "account" + // Bitbucket requires this scope to access '/user/emails' API endpoints. + scopeEmail = "email" + // Bitbucket requires this scope to access '/teams' API endpoints + // which are used when a client includes the 'groups' scope. + scopeTeams = "team" +) + +// Config holds configuration options for Bitbucket logins. +type Config struct { + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + RedirectURI string `json:"redirectURI"` + Teams []string `json:"teams"` + IncludeTeamGroups bool `json:"includeTeamGroups,omitempty"` +} + +// Open returns a strategy for logging in through Bitbucket. +func (c *Config) Open(_ string, logger log.Logger) (connector.Connector, error) { + b := bitbucketConnector{ + redirectURI: c.RedirectURI, + teams: c.Teams, + clientID: c.ClientID, + clientSecret: c.ClientSecret, + includeTeamGroups: c.IncludeTeamGroups, + apiURL: apiURL, + legacyAPIURL: legacyAPIURL, + logger: logger, + } + + return &b, nil +} + +type connectorData struct { + AccessToken string `json:"accessToken"` + RefreshToken string `json:"refreshToken"` + Expiry time.Time `json:"expiry"` +} + +var ( + _ connector.CallbackConnector = (*bitbucketConnector)(nil) + _ connector.RefreshConnector = (*bitbucketConnector)(nil) +) + +type bitbucketConnector struct { + redirectURI string + teams []string + clientID string + clientSecret string + logger log.Logger + apiURL string + legacyAPIURL string + + // the following are used only for tests + hostName string + httpClient *http.Client + + includeTeamGroups bool +} + +// groupsRequired returns whether dex requires Bitbucket's 'team' scope. +func (b *bitbucketConnector) groupsRequired(groupScope bool) bool { + return len(b.teams) > 0 || groupScope +} + +func (b *bitbucketConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config { + bitbucketScopes := []string{scopeAccount, scopeEmail} + if b.groupsRequired(scopes.Groups) { + bitbucketScopes = append(bitbucketScopes, scopeTeams) + } + + endpoint := bitbucket.Endpoint + if b.hostName != "" { + endpoint = oauth2.Endpoint{ + AuthURL: "https://" + b.hostName + "/site/oauth2/authorize", + TokenURL: "https://" + b.hostName + "/site/oauth2/access_token", + } + } + + return &oauth2.Config{ + ClientID: b.clientID, + ClientSecret: b.clientSecret, + Endpoint: endpoint, + Scopes: bitbucketScopes, + } +} + +func (b *bitbucketConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { + if b.redirectURI != callbackURL { + return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, b.redirectURI) + } + + return b.oauth2Config(scopes).AuthCodeURL(state), nil +} + +type oauth2Error struct { + error string + errorDescription string +} + +func (e *oauth2Error) Error() string { + if e.errorDescription == "" { + return e.error + } + return e.error + ": " + e.errorDescription +} + +func (b *bitbucketConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { + q := r.URL.Query() + if errType := q.Get("error"); errType != "" { + return identity, &oauth2Error{errType, q.Get("error_description")} + } + + oauth2Config := b.oauth2Config(s) + + ctx := r.Context() + if b.httpClient != nil { + ctx = context.WithValue(r.Context(), oauth2.HTTPClient, b.httpClient) + } + + token, err := oauth2Config.Exchange(ctx, q.Get("code")) + if err != nil { + return identity, fmt.Errorf("bitbucket: failed to get token: %v", err) + } + + client := oauth2Config.Client(ctx, token) + + user, err := b.user(ctx, client) + if err != nil { + return identity, fmt.Errorf("bitbucket: get user: %v", err) + } + + identity = connector.Identity{ + UserID: user.UUID, + Username: user.Username, + Email: user.Email, + EmailVerified: true, + } + + if b.groupsRequired(s.Groups) { + groups, err := b.getGroups(ctx, client, s.Groups, user.Username) + if err != nil { + return identity, err + } + identity.Groups = groups + } + + if s.OfflineAccess { + data := connectorData{ + AccessToken: token.AccessToken, + RefreshToken: token.RefreshToken, + Expiry: token.Expiry, + } + connData, err := json.Marshal(data) + if err != nil { + return identity, fmt.Errorf("bitbucket: marshal connector data: %v", err) + } + identity.ConnectorData = connData + } + + return identity, nil +} + +// Refreshing tokens +// https://github.com/golang/oauth2/issues/84#issuecomment-332860871 +type tokenNotifyFunc func(*oauth2.Token) error + +// notifyRefreshTokenSource is essentially `oauth2.ReuseTokenSource` with `TokenNotifyFunc` added. +type notifyRefreshTokenSource struct { + new oauth2.TokenSource + mu sync.Mutex // guards t + t *oauth2.Token + f tokenNotifyFunc // called when token refreshed so new refresh token can be persisted +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *notifyRefreshTokenSource) Token() (*oauth2.Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, s.f(t) +} + +func (b *bitbucketConnector) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { + if len(identity.ConnectorData) == 0 { + return identity, errors.New("bitbucket: no upstream access token found") + } + + var data connectorData + if err := json.Unmarshal(identity.ConnectorData, &data); err != nil { + return identity, fmt.Errorf("bitbucket: unmarshal access token: %v", err) + } + + tok := &oauth2.Token{ + AccessToken: data.AccessToken, + RefreshToken: data.RefreshToken, + Expiry: data.Expiry, + } + + client := oauth2.NewClient(ctx, ¬ifyRefreshTokenSource{ + new: b.oauth2Config(s).TokenSource(ctx, tok), + t: tok, + f: func(tok *oauth2.Token) error { + data := connectorData{ + AccessToken: tok.AccessToken, + RefreshToken: tok.RefreshToken, + Expiry: tok.Expiry, + } + connData, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("bitbucket: marshal connector data: %v", err) + } + identity.ConnectorData = connData + return nil + }, + }) + + user, err := b.user(ctx, client) + if err != nil { + return identity, fmt.Errorf("bitbucket: get user: %v", err) + } + + identity.Username = user.Username + identity.Email = user.Email + + if b.groupsRequired(s.Groups) { + groups, err := b.getGroups(ctx, client, s.Groups, user.Username) + if err != nil { + return identity, err + } + identity.Groups = groups + } + + return identity, nil +} + +// Bitbucket pagination wrapper +type pagedResponse struct { + Size int `json:"size"` + Page int `json:"page"` + PageLen int `json:"pagelen"` + Next *string `json:"next"` + Previous *string `json:"previous"` +} + +// user holds Bitbucket user information (relevant to dex) as defined by +// https://developer.atlassian.com/bitbucket/api/2/reference/resource/user +type user struct { + Username string `json:"username"` + UUID string `json:"uuid"` + Email string `json:"email"` +} + +// user queries the Bitbucket API for profile information using the provided client. +// +// The HTTP client is expected to be constructed by the golang.org/x/oauth2 package, +// which inserts a bearer token as part of the request. +func (b *bitbucketConnector) user(ctx context.Context, client *http.Client) (user, error) { + // https://developer.atlassian.com/bitbucket/api/2/reference/resource/user + var ( + u user + err error + ) + + if err = get(ctx, client, b.apiURL+"/user", &u); err != nil { + return user{}, err + } + + if u.Email, err = b.userEmail(ctx, client); err != nil { + return user{}, err + } + + return u, nil +} + +// userEmail holds Bitbucket user email information as defined by +// https://developer.atlassian.com/bitbucket/api/2/reference/resource/user/emails +type userEmail struct { + IsPrimary bool `json:"is_primary"` + IsConfirmed bool `json:"is_confirmed"` + Email string `json:"email"` +} + +type userEmailResponse struct { + pagedResponse + Values []userEmail +} + +// userEmail returns the users primary, confirmed email +// +// The HTTP client is expected to be constructed by the golang.org/x/oauth2 package, +// which inserts a bearer token as part of the request. +func (b *bitbucketConnector) userEmail(ctx context.Context, client *http.Client) (string, error) { + apiURL := b.apiURL + "/user/emails" + for { + // https://developer.atlassian.com/bitbucket/api/2/reference/resource/user/emails + var response userEmailResponse + + if err := get(ctx, client, apiURL, &response); err != nil { + return "", err + } + + for _, email := range response.Values { + if email.IsConfirmed && email.IsPrimary { + return email.Email, nil + } + } + + if response.Next == nil { + break + } + } + + return "", errors.New("bitbucket: user has no confirmed, primary email") +} + +// getGroups retrieves Bitbucket teams a user is in, if any. +func (b *bitbucketConnector) getGroups(ctx context.Context, client *http.Client, groupScope bool, userLogin string) ([]string, error) { + bitbucketTeams, err := b.userWorkspaces(ctx, client) + if err != nil { + return nil, err + } + + if len(b.teams) > 0 { + filteredTeams := groups.Filter(bitbucketTeams, b.teams) + if len(filteredTeams) == 0 { + return nil, fmt.Errorf("bitbucket: user %q is not in any of the required teams", userLogin) + } + return filteredTeams, nil + } else if groupScope { + return bitbucketTeams, nil + } + + return nil, nil +} + +type workspaceSlug struct { + Slug string `json:"slug"` +} + +type workspace struct { + Workspace workspaceSlug `json:"workspace"` +} + +type userWorkspacesResponse struct { + pagedResponse + Values []workspace `json:"values"` +} + +func (b *bitbucketConnector) userWorkspaces(ctx context.Context, client *http.Client) ([]string, error) { + var teams []string + apiURL := b.apiURL + "/user/permissions/workspaces" + + for { + // https://developer.atlassian.com/cloud/bitbucket/rest/api-group-workspaces/#api-workspaces-get + var response userWorkspacesResponse + + if err := get(ctx, client, apiURL, &response); err != nil { + return nil, fmt.Errorf("bitbucket: get user teams: %v", err) + } + + for _, value := range response.Values { + teams = append(teams, value.Workspace.Slug) + } + + if response.Next == nil { + break + } + } + + if b.includeTeamGroups { + for _, team := range teams { + teamGroups, err := b.userTeamGroups(ctx, client, team) + if err != nil { + return nil, fmt.Errorf("bitbucket: %v", err) + } + teams = append(teams, teamGroups...) + } + } + + return teams, nil +} + +type group struct { + Slug string `json:"slug"` +} + +func (b *bitbucketConnector) userTeamGroups(ctx context.Context, client *http.Client, teamName string) ([]string, error) { + apiURL := b.legacyAPIURL + "/groups/" + teamName + + var response []group + if err := get(ctx, client, apiURL, &response); err != nil { + return nil, fmt.Errorf("get user team %q groups: %v", teamName, err) + } + + teamGroups := make([]string, 0, len(response)) + for _, group := range response { + teamGroups = append(teamGroups, teamName+"/"+group.Slug) + } + + return teamGroups, nil +} + +// get creates a "GET `apiURL`" request with context, sends the request using +// the client, and decodes the resulting response body into v. +// Any errors encountered when building requests, sending requests, and +// reading and decoding response data are returned. +func get(ctx context.Context, client *http.Client, apiURL string, v interface{}) error { + req, err := http.NewRequest("GET", apiURL, nil) + if err != nil { + return fmt.Errorf("bitbucket: new req: %v", err) + } + req = req.WithContext(ctx) + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("bitbucket: get URL %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("bitbucket: read body: %s: %v", resp.Status, err) + } + return fmt.Errorf("%s: %s", resp.Status, body) + } + + if err := json.NewDecoder(resp.Body).Decode(v); err != nil { + return fmt.Errorf("bitbucket: failed to decode response: %v", err) + } + + return nil +} diff --git a/vendor/github.com/dexidp/dex/connector/connector.go b/vendor/github.com/dexidp/dex/connector/connector.go new file mode 100644 index 00000000..d812390f --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/connector.go @@ -0,0 +1,105 @@ +// Package connector defines interfaces for federated identity strategies. +package connector + +import ( + "context" + "net/http" +) + +// Connector is a mechanism for federating login to a remote identity service. +// +// Implementations are expected to implement either the PasswordConnector or +// CallbackConnector interface. +type Connector interface{} + +// Scopes represents additional data requested by the clients about the end user. +type Scopes struct { + // The client has requested a refresh token from the server. + OfflineAccess bool + + // The client has requested group information about the end user. + Groups bool +} + +// Identity represents the ID Token claims supported by the server. +type Identity struct { + UserID string + Username string + PreferredUsername string + Email string + EmailVerified bool + + Groups []string + + // ConnectorData holds data used by the connector for subsequent requests after initial + // authentication, such as access tokens for upstream provides. + // + // This data is never shared with end users, OAuth clients, or through the API. + ConnectorData []byte +} + +// PasswordConnector is an interface implemented by connectors which take a +// username and password. +// Prompt() is used to inform the handler what to display in the password +// template. If this returns an empty string, it'll default to "Username". +type PasswordConnector interface { + Prompt() string + Login(ctx context.Context, s Scopes, username, password string) (identity Identity, validPassword bool, err error) +} + +// CallbackConnector is an interface implemented by connectors which use an OAuth +// style redirect flow to determine user information. +type CallbackConnector interface { + // The initial URL to redirect the user to. + // + // OAuth2 implementations should request different scopes from the upstream + // identity provider based on the scopes requested by the downstream client. + // For example, if the downstream client requests a refresh token from the + // server, the connector should also request a token from the provider. + // + // Many identity providers have arbitrary restrictions on refresh tokens. For + // example Google only allows a single refresh token per client/user/scopes + // combination, and wont return a refresh token even if offline access is + // requested if one has already been issues. There's no good general answer + // for these kind of restrictions, and may require this package to become more + // aware of the global set of user/connector interactions. + LoginURL(s Scopes, callbackURL, state string) (string, error) + + // Handle the callback to the server and return an identity. + HandleCallback(s Scopes, r *http.Request) (identity Identity, err error) +} + +// SAMLConnector represents SAML connectors which implement the HTTP POST binding. +// +// RelayState is handled by the server. +// +// See: https://docs.oasis-open.org/security/saml/v2.0/saml-bindings-2.0-os.pdf +// "3.5 HTTP POST Binding" +type SAMLConnector interface { + // POSTData returns an encoded SAML request and SSO URL for the server to + // render a POST form with. + // + // POSTData should encode the provided request ID in the returned serialized + // SAML request. + POSTData(s Scopes, requestID string) (ssoURL, samlRequest string, err error) + + // HandlePOST decodes, verifies, and maps attributes from the SAML response. + // It passes the expected value of the "InResponseTo" response field, which + // the connector must ensure matches the response value. + // + // See: https://www.oasis-open.org/committees/download.php/35711/sstc-saml-core-errata-2.0-wd-06-diff.pdf + // "3.2.2 Complex Type StatusResponseType" + HandlePOST(s Scopes, samlResponse, inResponseTo string) (identity Identity, err error) +} + +// RefreshConnector is a connector that can update the client claims. +type RefreshConnector interface { + // Refresh is called when a client attempts to claim a refresh token. The + // connector should attempt to update the identity object to reflect any + // changes since the token was last refreshed. + Refresh(ctx context.Context, s Scopes, identity Identity) (Identity, error) +} + +type TokenIdentityConnector interface { + TokenIdentity(ctx context.Context, subjectTokenType, subjectToken string) (Identity, error) +} diff --git a/vendor/github.com/dexidp/dex/connector/gitea/BUILD b/vendor/github.com/dexidp/dex/connector/gitea/BUILD new file mode 100644 index 00000000..360af5cd --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/gitea/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "gitea", + srcs = ["gitea.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/gitea", + importpath = "github.com/dexidp/dex/connector/gitea", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/log", + "@org_golang_x_oauth2//:oauth2", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/gitea/gitea.go b/vendor/github.com/dexidp/dex/connector/gitea/gitea.go new file mode 100644 index 00000000..6b020994 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/gitea/gitea.go @@ -0,0 +1,424 @@ +// Package gitea provides authentication strategies using Gitea. +package gitea + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "sync" + "time" + + "golang.org/x/oauth2" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/log" +) + +// Config holds configuration options for gitea logins. +type Config struct { + BaseURL string `json:"baseURL"` + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + RedirectURI string `json:"redirectURI"` + Orgs []Org `json:"orgs"` + LoadAllGroups bool `json:"loadAllGroups"` + UseLoginAsID bool `json:"useLoginAsID"` +} + +// Org holds org-team filters, in which teams are optional. +type Org struct { + // Organization name in gitea (not slug, full name). Only users in this gitea + // organization can authenticate. + Name string `json:"name"` + + // Names of teams in a gitea organization. A user will be able to + // authenticate if they are members of at least one of these teams. Users + // in the organization can authenticate if this field is omitted from the + // config file. + Teams []string `json:"teams,omitempty"` +} + +type giteaUser struct { + ID int `json:"id"` + Name string `json:"full_name"` + Username string `json:"login"` + Email string `json:"email"` + IsAdmin bool `json:"is_admin"` +} + +// Open returns a strategy for logging in through Gitea +func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { + if c.BaseURL == "" { + c.BaseURL = "https://gitea.com" + } + return &giteaConnector{ + baseURL: c.BaseURL, + redirectURI: c.RedirectURI, + orgs: c.Orgs, + clientID: c.ClientID, + clientSecret: c.ClientSecret, + logger: logger, + loadAllGroups: c.LoadAllGroups, + useLoginAsID: c.UseLoginAsID, + }, nil +} + +type connectorData struct { + AccessToken string `json:"accessToken"` + RefreshToken string `json:"refreshToken"` + Expiry time.Time `json:"expiry"` +} + +var ( + _ connector.CallbackConnector = (*giteaConnector)(nil) + _ connector.RefreshConnector = (*giteaConnector)(nil) +) + +type giteaConnector struct { + baseURL string + redirectURI string + orgs []Org + clientID string + clientSecret string + logger log.Logger + httpClient *http.Client + // if set to true and no orgs are configured then connector loads all user claims (all orgs and team) + loadAllGroups bool + // if set to true will use the user's handle rather than their numeric id as the ID + useLoginAsID bool +} + +func (c *giteaConnector) oauth2Config(_ connector.Scopes) *oauth2.Config { + giteaEndpoint := oauth2.Endpoint{AuthURL: c.baseURL + "/login/oauth/authorize", TokenURL: c.baseURL + "/login/oauth/access_token"} + return &oauth2.Config{ + ClientID: c.clientID, + ClientSecret: c.clientSecret, + Endpoint: giteaEndpoint, + RedirectURL: c.redirectURI, + } +} + +func (c *giteaConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { + if c.redirectURI != callbackURL { + return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", c.redirectURI, callbackURL) + } + return c.oauth2Config(scopes).AuthCodeURL(state), nil +} + +type oauth2Error struct { + error string + errorDescription string +} + +func (e *oauth2Error) Error() string { + if e.errorDescription == "" { + return e.error + } + return e.error + ": " + e.errorDescription +} + +func (c *giteaConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { + q := r.URL.Query() + if errType := q.Get("error"); errType != "" { + return identity, &oauth2Error{errType, q.Get("error_description")} + } + + oauth2Config := c.oauth2Config(s) + + ctx := r.Context() + if c.httpClient != nil { + ctx = context.WithValue(r.Context(), oauth2.HTTPClient, c.httpClient) + } + + token, err := oauth2Config.Exchange(ctx, q.Get("code")) + if err != nil { + return identity, fmt.Errorf("gitea: failed to get token: %v", err) + } + + client := oauth2Config.Client(ctx, token) + + user, err := c.user(ctx, client) + if err != nil { + return identity, fmt.Errorf("gitea: get user: %v", err) + } + + username := user.Name + if username == "" { + username = user.Email + } + + identity = connector.Identity{ + UserID: strconv.Itoa(user.ID), + Username: username, + PreferredUsername: user.Username, + Email: user.Email, + EmailVerified: true, + } + if c.useLoginAsID { + identity.UserID = user.Username + } + + // Only set identity.Groups if 'orgs', 'org', or 'groups' scope are specified. + if c.groupsRequired() { + groups, err := c.getGroups(ctx, client) + if err != nil { + return identity, err + } + identity.Groups = groups + } + + if s.OfflineAccess { + data := connectorData{ + AccessToken: token.AccessToken, + RefreshToken: token.RefreshToken, + Expiry: token.Expiry, + } + connData, err := json.Marshal(data) + if err != nil { + return identity, fmt.Errorf("gitea: marshal connector data: %v", err) + } + identity.ConnectorData = connData + } + + return identity, nil +} + +// Refreshing tokens +// https://github.com/golang/oauth2/issues/84#issuecomment-332860871 +type tokenNotifyFunc func(*oauth2.Token) error + +// notifyRefreshTokenSource is essentially `oauth2.ReuseTokenSource` with `TokenNotifyFunc` added. +type notifyRefreshTokenSource struct { + new oauth2.TokenSource + mu sync.Mutex // guards t + t *oauth2.Token + f tokenNotifyFunc // called when token refreshed so new refresh token can be persisted +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *notifyRefreshTokenSource) Token() (*oauth2.Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, s.f(t) +} + +func (c *giteaConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { + if len(ident.ConnectorData) == 0 { + return ident, errors.New("gitea: no upstream access token found") + } + + var data connectorData + if err := json.Unmarshal(ident.ConnectorData, &data); err != nil { + return ident, fmt.Errorf("gitea: unmarshal access token: %v", err) + } + + tok := &oauth2.Token{ + AccessToken: data.AccessToken, + RefreshToken: data.RefreshToken, + Expiry: data.Expiry, + } + + client := oauth2.NewClient(ctx, ¬ifyRefreshTokenSource{ + new: c.oauth2Config(s).TokenSource(ctx, tok), + t: tok, + f: func(tok *oauth2.Token) error { + data := connectorData{ + AccessToken: tok.AccessToken, + RefreshToken: tok.RefreshToken, + Expiry: tok.Expiry, + } + connData, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("gitea: marshal connector data: %v", err) + } + ident.ConnectorData = connData + return nil + }, + }) + user, err := c.user(ctx, client) + if err != nil { + return ident, fmt.Errorf("gitea: get user: %v", err) + } + + username := user.Name + if username == "" { + username = user.Email + } + ident.Username = username + ident.PreferredUsername = user.Username + ident.Email = user.Email + + // Only set identity.Groups if 'orgs', 'org', or 'groups' scope are specified. + if c.groupsRequired() { + groups, err := c.getGroups(ctx, client) + if err != nil { + return ident, err + } + ident.Groups = groups + } + + return ident, nil +} + +// getGroups retrieves Gitea orgs and teams a user is in, if any. +func (c *giteaConnector) getGroups(ctx context.Context, client *http.Client) ([]string, error) { + if len(c.orgs) > 0 { + return c.groupsForOrgs(ctx, client) + } else if c.loadAllGroups { + return c.userGroups(ctx, client) + } + return nil, nil +} + +// formatTeamName returns unique team name. +// Orgs might have the same team names. To make team name unique it should be prefixed with the org name. +func formatTeamName(org string, team string) string { + return fmt.Sprintf("%s:%s", org, team) +} + +// groupsForOrgs returns list of groups that user belongs to in approved list +func (c *giteaConnector) groupsForOrgs(ctx context.Context, client *http.Client) ([]string, error) { + groups, err := c.userGroups(ctx, client) + if err != nil { + return groups, err + } + + keys := make(map[string]bool) + for _, o := range c.orgs { + keys[o.Name] = true + if o.Teams != nil { + for _, t := range o.Teams { + keys[formatTeamName(o.Name, t)] = true + } + } + } + atLeastOne := false + filteredGroups := make([]string, 0) + for _, g := range groups { + if _, value := keys[g]; value { + filteredGroups = append(filteredGroups, g) + atLeastOne = true + } + } + + if !atLeastOne { + return []string{}, fmt.Errorf("gitea: User does not belong to any of the approved groups") + } + return filteredGroups, nil +} + +type organization struct { + ID int64 `json:"id"` + Name string `json:"username"` +} + +type team struct { + ID int64 `json:"id"` + Name string `json:"name"` + Organization *organization `json:"organization"` +} + +func (c *giteaConnector) userGroups(ctx context.Context, client *http.Client) ([]string, error) { + apiURL := c.baseURL + "/api/v1/user/teams" + groups := make([]string, 0) + page := 1 + limit := 20 + for { + var teams []team + req, err := http.NewRequest("GET", fmt.Sprintf("%s?page=%d&limit=%d", apiURL, page, limit), nil) + if err != nil { + return groups, fmt.Errorf("gitea: new req: %v", err) + } + + req = req.WithContext(ctx) + resp, err := client.Do(req) + if err != nil { + return groups, fmt.Errorf("gitea: get URL %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return groups, fmt.Errorf("gitea: read body: %v", err) + } + return groups, fmt.Errorf("%s: %s", resp.Status, body) + } + + if err := json.NewDecoder(resp.Body).Decode(&teams); err != nil { + return groups, fmt.Errorf("failed to decode response: %v", err) + } + + if len(teams) == 0 { + break + } + + for _, t := range teams { + groups = append(groups, t.Organization.Name) + groups = append(groups, formatTeamName(t.Organization.Name, t.Name)) + } + + page++ + } + + // remove duplicate slice variables + keys := make(map[string]struct{}) + list := []string{} + for _, group := range groups { + if _, exists := keys[group]; !exists { + keys[group] = struct{}{} + list = append(list, group) + } + } + groups = list + return groups, nil +} + +// user queries the Gitea API for profile information using the provided client. The HTTP +// client is expected to be constructed by the golang.org/x/oauth2 package, which inserts +// a bearer token as part of the request. +func (c *giteaConnector) user(ctx context.Context, client *http.Client) (giteaUser, error) { + var u giteaUser + req, err := http.NewRequest("GET", c.baseURL+"/api/v1/user", nil) + if err != nil { + return u, fmt.Errorf("gitea: new req: %v", err) + } + req = req.WithContext(ctx) + resp, err := client.Do(req) + if err != nil { + return u, fmt.Errorf("gitea: get URL %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return u, fmt.Errorf("gitea: read body: %v", err) + } + return u, fmt.Errorf("%s: %s", resp.Status, body) + } + + if err := json.NewDecoder(resp.Body).Decode(&u); err != nil { + return u, fmt.Errorf("failed to decode response: %v", err) + } + return u, nil +} + +// groupsRequired returns whether dex needs to request groups from Gitea. +func (c *giteaConnector) groupsRequired() bool { + return len(c.orgs) > 0 || c.loadAllGroups +} diff --git a/vendor/github.com/dexidp/dex/connector/github/BUILD b/vendor/github.com/dexidp/dex/connector/github/BUILD new file mode 100644 index 00000000..239ddc1c --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/github/BUILD @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "github", + srcs = ["github.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/github", + importpath = "github.com/dexidp/dex/connector/github", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/groups", + "//vendor/github.com/dexidp/dex/pkg/httpclient", + "//vendor/github.com/dexidp/dex/pkg/log", + "@org_golang_x_oauth2//:oauth2", + "@org_golang_x_oauth2//github", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/github/github.go b/vendor/github.com/dexidp/dex/connector/github/github.go new file mode 100644 index 00000000..6cb0db09 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/github/github.go @@ -0,0 +1,733 @@ +// Package github provides authentication strategies using GitHub. +package github + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "regexp" + "strconv" + "strings" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/github" + + "github.com/dexidp/dex/connector" + groups_pkg "github.com/dexidp/dex/pkg/groups" + "github.com/dexidp/dex/pkg/httpclient" + "github.com/dexidp/dex/pkg/log" +) + +const ( + apiURL = "https://api.github.com" + // GitHub requires this scope to access '/user' and '/user/emails' API endpoints. + scopeEmail = "user:email" + // GitHub requires this scope to access '/user/teams' and '/orgs' API endpoints + // which are used when a client includes the 'groups' scope. + scopeOrgs = "read:org" +) + +// Pagination URL patterns +// https://developer.github.com/v3/#pagination +var ( + reNext = regexp.MustCompile("<([^>]+)>; rel=\"next\"") + reLast = regexp.MustCompile("<([^>]+)>; rel=\"last\"") +) + +// Config holds configuration options for github logins. +type Config struct { + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + RedirectURI string `json:"redirectURI"` + Org string `json:"org"` + Orgs []Org `json:"orgs"` + HostName string `json:"hostName"` + RootCA string `json:"rootCA"` + TeamNameField string `json:"teamNameField"` + LoadAllGroups bool `json:"loadAllGroups"` + UseLoginAsID bool `json:"useLoginAsID"` + PreferredEmailDomain string `json:"preferredEmailDomain"` +} + +// Org holds org-team filters, in which teams are optional. +type Org struct { + // Organization name in github (not slug, full name). Only users in this github + // organization can authenticate. + Name string `json:"name"` + + // Names of teams in a github organization. A user will be able to + // authenticate if they are members of at least one of these teams. Users + // in the organization can authenticate if this field is omitted from the + // config file. + Teams []string `json:"teams,omitempty"` +} + +// Open returns a strategy for logging in through GitHub. +func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { + if c.Org != "" { + // Return error if both 'org' and 'orgs' fields are used. + if len(c.Orgs) > 0 { + return nil, errors.New("github: cannot use both 'org' and 'orgs' fields simultaneously") + } + logger.Warn("github: legacy field 'org' being used. Switch to the newer 'orgs' field structure") + } + + g := githubConnector{ + redirectURI: c.RedirectURI, + org: c.Org, + orgs: c.Orgs, + clientID: c.ClientID, + clientSecret: c.ClientSecret, + apiURL: apiURL, + logger: logger, + useLoginAsID: c.UseLoginAsID, + preferredEmailDomain: c.PreferredEmailDomain, + } + + if c.HostName != "" { + // ensure this is a hostname and not a URL or path. + if strings.Contains(c.HostName, "/") { + return nil, errors.New("invalid hostname: hostname cannot contain `/`") + } + + g.hostName = c.HostName + g.apiURL = "https://" + c.HostName + "/api/v3" + } + + if c.RootCA != "" { + if c.HostName == "" { + return nil, errors.New("invalid connector config: Host name field required for a root certificate file") + } + g.rootCA = c.RootCA + + var err error + if g.httpClient, err = httpclient.NewHTTPClient([]string{g.rootCA}, false); err != nil { + return nil, fmt.Errorf("failed to create HTTP client: %v", err) + } + } + g.loadAllGroups = c.LoadAllGroups + + switch c.TeamNameField { + case "name", "slug", "both", "": + g.teamNameField = c.TeamNameField + default: + return nil, fmt.Errorf("invalid connector config: unsupported team name field value `%s`", c.TeamNameField) + } + + if c.PreferredEmailDomain != "" { + if strings.HasSuffix(c.PreferredEmailDomain, "*") { + return nil, errors.New("invalid PreferredEmailDomain: glob pattern cannot end with \"*\"") + } + } + + return &g, nil +} + +type connectorData struct { + // GitHub's OAuth2 tokens never expire. We don't need a refresh token. + AccessToken string `json:"accessToken"` +} + +var ( + _ connector.CallbackConnector = (*githubConnector)(nil) + _ connector.RefreshConnector = (*githubConnector)(nil) +) + +type githubConnector struct { + redirectURI string + org string + orgs []Org + clientID string + clientSecret string + logger log.Logger + // apiURL defaults to "https://api.github.com" + apiURL string + // hostName of the GitHub enterprise account. + hostName string + // Used to support untrusted/self-signed CA certs. + rootCA string + // HTTP Client that trusts the custom declared rootCA cert. + httpClient *http.Client + // optional choice between 'name' (default) or 'slug' + teamNameField string + // if set to true and no orgs are configured then connector loads all user claims (all orgs and team) + loadAllGroups bool + // if set to true will use the user's handle rather than their numeric id as the ID + useLoginAsID bool + // the domain to be preferred among the user's emails. e.g. "github.com" + preferredEmailDomain string +} + +// groupsRequired returns whether dex requires GitHub's 'read:org' scope. Dex +// needs 'read:org' if 'orgs' or 'org' fields are populated in a config file. +// Clients can require 'groups' scope without setting 'orgs'/'org'. +func (c *githubConnector) groupsRequired(groupScope bool) bool { + return len(c.orgs) > 0 || c.org != "" || groupScope +} + +func (c *githubConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config { + // 'read:org' scope is required by the GitHub API, and thus for dex to ensure + // a user is a member of orgs and teams provided in configs. + githubScopes := []string{scopeEmail} + if c.groupsRequired(scopes.Groups) { + githubScopes = append(githubScopes, scopeOrgs) + } + + endpoint := github.Endpoint + // case when it is a GitHub Enterprise account. + if c.hostName != "" { + endpoint = oauth2.Endpoint{ + AuthURL: "https://" + c.hostName + "/login/oauth/authorize", + TokenURL: "https://" + c.hostName + "/login/oauth/access_token", + } + } + + return &oauth2.Config{ + ClientID: c.clientID, + ClientSecret: c.clientSecret, + Endpoint: endpoint, + Scopes: githubScopes, + RedirectURL: c.redirectURI, + } +} + +func (c *githubConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { + if c.redirectURI != callbackURL { + return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) + } + + return c.oauth2Config(scopes).AuthCodeURL(state), nil +} + +type oauth2Error struct { + error string + errorDescription string +} + +func (e *oauth2Error) Error() string { + if e.errorDescription == "" { + return e.error + } + return e.error + ": " + e.errorDescription +} + +func (c *githubConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { + q := r.URL.Query() + if errType := q.Get("error"); errType != "" { + return identity, &oauth2Error{errType, q.Get("error_description")} + } + + oauth2Config := c.oauth2Config(s) + + ctx := r.Context() + // GitHub Enterprise account + if c.httpClient != nil { + ctx = context.WithValue(r.Context(), oauth2.HTTPClient, c.httpClient) + } + + token, err := oauth2Config.Exchange(ctx, q.Get("code")) + if err != nil { + return identity, fmt.Errorf("github: failed to get token: %v", err) + } + + client := oauth2Config.Client(ctx, token) + + user, err := c.user(ctx, client) + if err != nil { + return identity, fmt.Errorf("github: get user: %v", err) + } + + username := user.Name + if username == "" { + username = user.Login + } + + identity = connector.Identity{ + UserID: strconv.Itoa(user.ID), + Username: username, + PreferredUsername: user.Login, + Email: user.Email, + EmailVerified: true, + } + if c.useLoginAsID { + identity.UserID = user.Login + } + + // Only set identity.Groups if 'orgs', 'org', or 'groups' scope are specified. + if c.groupsRequired(s.Groups) { + groups, err := c.getGroups(ctx, client, s.Groups, user.Login) + if err != nil { + return identity, err + } + identity.Groups = groups + } + + if s.OfflineAccess { + data := connectorData{AccessToken: token.AccessToken} + connData, err := json.Marshal(data) + if err != nil { + return identity, fmt.Errorf("marshal connector data: %v", err) + } + identity.ConnectorData = connData + } + + return identity, nil +} + +func (c *githubConnector) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { + if len(identity.ConnectorData) == 0 { + return identity, errors.New("no upstream access token found") + } + + var data connectorData + if err := json.Unmarshal(identity.ConnectorData, &data); err != nil { + return identity, fmt.Errorf("github: unmarshal access token: %v", err) + } + + client := c.oauth2Config(s).Client(ctx, &oauth2.Token{AccessToken: data.AccessToken}) + user, err := c.user(ctx, client) + if err != nil { + return identity, fmt.Errorf("github: get user: %v", err) + } + + username := user.Name + if username == "" { + username = user.Login + } + identity.Username = username + identity.PreferredUsername = user.Login + identity.Email = user.Email + + // Only set identity.Groups if 'orgs', 'org', or 'groups' scope are specified. + if c.groupsRequired(s.Groups) { + groups, err := c.getGroups(ctx, client, s.Groups, user.Login) + if err != nil { + return identity, err + } + identity.Groups = groups + } + + return identity, nil +} + +// getGroups retrieves GitHub orgs and teams a user is in, if any. +func (c *githubConnector) getGroups(ctx context.Context, client *http.Client, groupScope bool, userLogin string) ([]string, error) { + switch { + case len(c.orgs) > 0: + return c.groupsForOrgs(ctx, client, userLogin) + case c.org != "": + return c.teamsForOrg(ctx, client, c.org) + case groupScope && c.loadAllGroups: + return c.userGroups(ctx, client) + } + return nil, nil +} + +// formatTeamName returns unique team name. +// Orgs might have the same team names. To make team name unique it should be prefixed with the org name. +func formatTeamName(org string, team string) string { + return fmt.Sprintf("%s:%s", org, team) +} + +// groupsForOrgs enforces org and team constraints on user authorization +// Cases in which user is authorized: +// +// N orgs, no teams: user is member of at least 1 org +// N orgs, M teams per org: user is member of any team from at least 1 org +// N-1 orgs, M teams per org, 1 org with no teams: user is member of any team +// +// from at least 1 org, or member of org with no teams +func (c *githubConnector) groupsForOrgs(ctx context.Context, client *http.Client, userName string) ([]string, error) { + groups := make([]string, 0) + var inOrgNoTeams bool + for _, org := range c.orgs { + inOrg, err := c.userInOrg(ctx, client, userName, org.Name) + if err != nil { + return nil, err + } + if !inOrg { + continue + } + + teams, err := c.teamsForOrg(ctx, client, org.Name) + if err != nil { + return nil, err + } + // User is in at least one org. User is authorized if no teams are specified + // in config; include all teams in claim. Otherwise filter out teams not in + // 'teams' list in config. + if len(org.Teams) == 0 { + inOrgNoTeams = true + } else if teams = groups_pkg.Filter(teams, org.Teams); len(teams) == 0 { + c.logger.Infof("github: user %q in org %q but no teams", userName, org.Name) + } + + for _, teamName := range teams { + groups = append(groups, formatTeamName(org.Name, teamName)) + } + } + if inOrgNoTeams || len(groups) > 0 { + return groups, nil + } + return groups, fmt.Errorf("github: user %q not in required orgs or teams", userName) +} + +func (c *githubConnector) userGroups(ctx context.Context, client *http.Client) ([]string, error) { + orgs, err := c.userOrgs(ctx, client) + if err != nil { + return nil, err + } + + orgTeams, err := c.userOrgTeams(ctx, client) + if err != nil { + return nil, err + } + + groups := make([]string, 0) + for _, o := range orgs { + groups = append(groups, o) + if teams, ok := orgTeams[o]; ok { + for _, t := range teams { + groups = append(groups, formatTeamName(o, t)) + } + } + } + + return groups, nil +} + +// userOrgs retrieves list of current user orgs +func (c *githubConnector) userOrgs(ctx context.Context, client *http.Client) ([]string, error) { + groups := make([]string, 0) + apiURL := c.apiURL + "/user/orgs" + for { + // https://developer.github.com/v3/orgs/#list-your-organizations + var ( + orgs []org + err error + ) + if apiURL, err = get(ctx, client, apiURL, &orgs); err != nil { + return nil, fmt.Errorf("github: get orgs: %v", err) + } + + for _, o := range orgs { + groups = append(groups, o.Login) + } + + if apiURL == "" { + break + } + } + + return groups, nil +} + +// userOrgTeams retrieves teams which current user belongs to. +// Method returns a map where key is an org name and value list of teams under the org. +func (c *githubConnector) userOrgTeams(ctx context.Context, client *http.Client) (map[string][]string, error) { + groups := make(map[string][]string) + apiURL := c.apiURL + "/user/teams" + for { + // https://developer.github.com/v3/orgs/teams/#list-user-teams + var ( + teams []team + err error + ) + if apiURL, err = get(ctx, client, apiURL, &teams); err != nil { + return nil, fmt.Errorf("github: get teams: %v", err) + } + + for _, t := range teams { + groups[t.Org.Login] = append(groups[t.Org.Login], c.teamGroupClaims(t)...) + } + + if apiURL == "" { + break + } + } + + return groups, nil +} + +// get creates a "GET `apiURL`" request with context, sends the request using +// the client, and decodes the resulting response body into v. A pagination URL +// is returned if one exists. Any errors encountered when building requests, +// sending requests, and reading and decoding response data are returned. +func get(ctx context.Context, client *http.Client, apiURL string, v interface{}) (string, error) { + req, err := http.NewRequest("GET", apiURL, nil) + if err != nil { + return "", fmt.Errorf("github: new req: %v", err) + } + req = req.WithContext(ctx) + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("github: get URL %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("github: read body: %v", err) + } + return "", fmt.Errorf("%s: %s", resp.Status, body) + } + + if err := json.NewDecoder(resp.Body).Decode(v); err != nil { + return "", fmt.Errorf("failed to decode response: %v", err) + } + + return getPagination(apiURL, resp), nil +} + +// getPagination checks the "Link" header field for "next" or "last" pagination URLs, +// and returns "next" page URL or empty string to indicate that there are no more pages. +// Non empty next pages' URL is returned if both "last" and "next" URLs are found and next page +// URL is not equal to last. +// +// https://developer.github.com/v3/#pagination +func getPagination(apiURL string, resp *http.Response) string { + if resp == nil { + return "" + } + + links := resp.Header.Get("Link") + if len(reLast.FindStringSubmatch(links)) > 1 { + lastPageURL := reLast.FindStringSubmatch(links)[1] + if apiURL == lastPageURL { + return "" + } + } else { + return "" + } + + if len(reNext.FindStringSubmatch(links)) > 1 { + return reNext.FindStringSubmatch(links)[1] + } + + return "" +} + +// user holds GitHub user information (relevant to dex) as defined by +// https://developer.github.com/v3/users/#response-with-public-profile-information +type user struct { + Name string `json:"name"` + Login string `json:"login"` + ID int `json:"id"` + Email string `json:"email"` +} + +// user queries the GitHub API for profile information using the provided client. +// +// The HTTP client is expected to be constructed by the golang.org/x/oauth2 package, +// which inserts a bearer token as part of the request. +func (c *githubConnector) user(ctx context.Context, client *http.Client) (user, error) { + // https://developer.github.com/v3/users/#get-the-authenticated-user + var u user + if _, err := get(ctx, client, c.apiURL+"/user", &u); err != nil { + return u, err + } + + // Only public user emails are returned by 'GET /user'. u.Email will be empty + // if a users' email is private. We must retrieve private emails explicitly. + if u.Email == "" { + var err error + if u.Email, err = c.userEmail(ctx, client); err != nil { + return u, err + } + } + return u, nil +} + +// userEmail holds GitHub user email information as defined by +// https://developer.github.com/v3/users/emails/#response +type userEmail struct { + Email string `json:"email"` + Verified bool `json:"verified"` + Primary bool `json:"primary"` + Visibility string `json:"visibility"` +} + +// userEmail queries the GitHub API for a users' email information using the +// provided client. Only returns the users' verified, primary email (private or +// public). +// +// The HTTP client is expected to be constructed by the golang.org/x/oauth2 package, +// which inserts a bearer token as part of the request. +func (c *githubConnector) userEmail(ctx context.Context, client *http.Client) (string, error) { + var ( + primaryEmail userEmail + preferredEmails []userEmail + ) + + apiURL := c.apiURL + "/user/emails" + + for { + // https://developer.github.com/v3/users/emails/#list-email-addresses-for-a-user + var ( + emails []userEmail + err error + ) + if apiURL, err = get(ctx, client, apiURL, &emails); err != nil { + return "", err + } + + for _, email := range emails { + /* + if GitHub Enterprise, set email.Verified to true + This change being made because GitHub Enterprise does not + support email verification. CircleCI indicated that GitHub + advised them not to check for verified emails + (https://circleci.com/enterprise/changelog/#1-47-1). + In addition, GitHub Enterprise support replied to a support + ticket with "There is no way to verify an email address in + GitHub Enterprise." + */ + if c.hostName != "" { + email.Verified = true + } + + if email.Verified && email.Primary { + primaryEmail = email + } + + if c.preferredEmailDomain != "" { + _, domainPart, ok := strings.Cut(email.Email, "@") + if !ok { + return "", errors.New("github: invalid format email is detected") + } + if email.Verified && c.isPreferredEmailDomain(domainPart) { + preferredEmails = append(preferredEmails, email) + } + } + } + + if apiURL == "" { + break + } + } + + if len(preferredEmails) > 0 { + return preferredEmails[0].Email, nil + } + + if primaryEmail.Email != "" { + return primaryEmail.Email, nil + } + + return "", errors.New("github: user has no verified, primary email or preferred-domain email") +} + +// isPreferredEmailDomain checks the domain is matching with preferredEmailDomain. +func (c *githubConnector) isPreferredEmailDomain(domain string) bool { + if domain == c.preferredEmailDomain { + return true + } + + preferredDomainParts := strings.Split(c.preferredEmailDomain, ".") + domainParts := strings.Split(domain, ".") + + if len(preferredDomainParts) != len(domainParts) { + return false + } + + for i, v := range preferredDomainParts { + if domainParts[i] != v && v != "*" { + return false + } + } + return true +} + +// userInOrg queries the GitHub API for a users' org membership. +// +// The HTTP passed client is expected to be constructed by the golang.org/x/oauth2 package, +// which inserts a bearer token as part of the request. +func (c *githubConnector) userInOrg(ctx context.Context, client *http.Client, userName, orgName string) (bool, error) { + // requester == user, so GET-ing this endpoint should return 404/302 if user + // is not a member + // + // https://developer.github.com/v3/orgs/members/#check-membership + apiURL := fmt.Sprintf("%s/orgs/%s/members/%s", c.apiURL, orgName, userName) + + req, err := http.NewRequest("GET", apiURL, nil) + if err != nil { + return false, fmt.Errorf("github: new req: %v", err) + } + req = req.WithContext(ctx) + resp, err := client.Do(req) + if err != nil { + return false, fmt.Errorf("github: get teams: %v", err) + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusNoContent: + case http.StatusFound, http.StatusNotFound: + c.logger.Infof("github: user %q not in org %q or application not authorized to read org data", userName, orgName) + default: + err = fmt.Errorf("github: unexpected return status: %q", resp.Status) + } + + // 204 if user is a member + return resp.StatusCode == http.StatusNoContent, err +} + +// teams holds GitHub a users' team information as defined by +// https://developer.github.com/v3/orgs/teams/#response-12 +type team struct { + Name string `json:"name"` + Org org `json:"organization"` + Slug string `json:"slug"` +} + +type org struct { + Login string `json:"login"` +} + +// teamsForOrg queries the GitHub API for team membership within a specific organization. +// +// The HTTP passed client is expected to be constructed by the golang.org/x/oauth2 package, +// which inserts a bearer token as part of the request. +func (c *githubConnector) teamsForOrg(ctx context.Context, client *http.Client, orgName string) ([]string, error) { + apiURL, groups := c.apiURL+"/user/teams", []string{} + for { + // https://developer.github.com/v3/orgs/teams/#list-user-teams + var ( + teams []team + err error + ) + if apiURL, err = get(ctx, client, apiURL, &teams); err != nil { + return nil, fmt.Errorf("github: get teams: %v", err) + } + + for _, t := range teams { + if t.Org.Login == orgName { + groups = append(groups, c.teamGroupClaims(t)...) + } + } + + if apiURL == "" { + break + } + } + + return groups, nil +} + +// teamGroupClaims returns team slug if 'teamNameField' option is set to +// 'slug', returns the slug *and* name if set to 'both', otherwise returns team +// name. +func (c *githubConnector) teamGroupClaims(t team) []string { + switch c.teamNameField { + case "both": + return []string{t.Name, t.Slug} + case "slug": + return []string{t.Slug} + default: + return []string{t.Name} + } +} diff --git a/vendor/github.com/dexidp/dex/connector/gitlab/BUILD b/vendor/github.com/dexidp/dex/connector/gitlab/BUILD new file mode 100644 index 00000000..f9457798 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/gitlab/BUILD @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "gitlab", + srcs = ["gitlab.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/gitlab", + importpath = "github.com/dexidp/dex/connector/gitlab", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/groups", + "//vendor/github.com/dexidp/dex/pkg/log", + "@org_golang_x_oauth2//:oauth2", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/gitlab/gitlab.go b/vendor/github.com/dexidp/dex/connector/gitlab/gitlab.go new file mode 100644 index 00000000..099cd2ef --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/gitlab/gitlab.go @@ -0,0 +1,310 @@ +// Package gitlab provides authentication strategies using GitLab. +package gitlab + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "time" + + "golang.org/x/oauth2" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/groups" + "github.com/dexidp/dex/pkg/log" +) + +const ( + // read operations of the /api/v4/user endpoint + scopeUser = "read_user" + // used to retrieve groups from /oauth/userinfo + // https://docs.gitlab.com/ee/integration/openid_connect_provider.html + scopeOpenID = "openid" +) + +// Config holds configuration options for gitlab logins. +type Config struct { + BaseURL string `json:"baseURL"` + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + RedirectURI string `json:"redirectURI"` + Groups []string `json:"groups"` + UseLoginAsID bool `json:"useLoginAsID"` +} + +type gitlabUser struct { + ID int + Name string + Username string + State string + Email string + IsAdmin bool +} + +// Open returns a strategy for logging in through GitLab. +func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { + if c.BaseURL == "" { + c.BaseURL = "https://gitlab.com" + } + return &gitlabConnector{ + baseURL: c.BaseURL, + redirectURI: c.RedirectURI, + clientID: c.ClientID, + clientSecret: c.ClientSecret, + logger: logger, + groups: c.Groups, + useLoginAsID: c.UseLoginAsID, + }, nil +} + +type connectorData struct { + // Support GitLab's Access Tokens and Refresh tokens. + AccessToken string `json:"accessToken"` + RefreshToken string `json:"refreshToken"` +} + +var ( + _ connector.CallbackConnector = (*gitlabConnector)(nil) + _ connector.RefreshConnector = (*gitlabConnector)(nil) +) + +type gitlabConnector struct { + baseURL string + redirectURI string + groups []string + clientID string + clientSecret string + logger log.Logger + httpClient *http.Client + // if set to true will use the user's handle rather than their numeric id as the ID + useLoginAsID bool +} + +func (c *gitlabConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config { + gitlabScopes := []string{scopeUser} + if c.groupsRequired(scopes.Groups) { + gitlabScopes = []string{scopeUser, scopeOpenID} + } + + gitlabEndpoint := oauth2.Endpoint{AuthURL: c.baseURL + "/oauth/authorize", TokenURL: c.baseURL + "/oauth/token"} + return &oauth2.Config{ + ClientID: c.clientID, + ClientSecret: c.clientSecret, + Endpoint: gitlabEndpoint, + Scopes: gitlabScopes, + RedirectURL: c.redirectURI, + } +} + +func (c *gitlabConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { + if c.redirectURI != callbackURL { + return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", c.redirectURI, callbackURL) + } + return c.oauth2Config(scopes).AuthCodeURL(state), nil +} + +type oauth2Error struct { + error string + errorDescription string +} + +func (e *oauth2Error) Error() string { + if e.errorDescription == "" { + return e.error + } + return e.error + ": " + e.errorDescription +} + +func (c *gitlabConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { + q := r.URL.Query() + if errType := q.Get("error"); errType != "" { + return identity, &oauth2Error{errType, q.Get("error_description")} + } + + oauth2Config := c.oauth2Config(s) + + ctx := r.Context() + if c.httpClient != nil { + ctx = context.WithValue(r.Context(), oauth2.HTTPClient, c.httpClient) + } + + token, err := oauth2Config.Exchange(ctx, q.Get("code")) + if err != nil { + return identity, fmt.Errorf("gitlab: failed to get token: %v", err) + } + + return c.identity(ctx, s, token) +} + +func (c *gitlabConnector) identity(ctx context.Context, s connector.Scopes, token *oauth2.Token) (identity connector.Identity, err error) { + oauth2Config := c.oauth2Config(s) + client := oauth2Config.Client(ctx, token) + + user, err := c.user(ctx, client) + if err != nil { + return identity, fmt.Errorf("gitlab: get user: %v", err) + } + + username := user.Name + if username == "" { + username = user.Email + } + + identity = connector.Identity{ + UserID: strconv.Itoa(user.ID), + Username: username, + PreferredUsername: user.Username, + Email: user.Email, + EmailVerified: true, + } + if c.useLoginAsID { + identity.UserID = user.Username + } + + if c.groupsRequired(s.Groups) { + groups, err := c.getGroups(ctx, client, s.Groups, user.Username) + if err != nil { + return identity, fmt.Errorf("gitlab: get groups: %v", err) + } + identity.Groups = groups + } + + if s.OfflineAccess { + data := connectorData{RefreshToken: token.RefreshToken, AccessToken: token.AccessToken} + connData, err := json.Marshal(data) + if err != nil { + return identity, fmt.Errorf("gitlab: marshal connector data: %v", err) + } + identity.ConnectorData = connData + } + + return identity, nil +} + +func (c *gitlabConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { + var data connectorData + if err := json.Unmarshal(ident.ConnectorData, &data); err != nil { + return ident, fmt.Errorf("gitlab: unmarshal connector data: %v", err) + } + oauth2Config := c.oauth2Config(s) + + if c.httpClient != nil { + ctx = context.WithValue(ctx, oauth2.HTTPClient, c.httpClient) + } + + switch { + case data.RefreshToken != "": + { + t := &oauth2.Token{ + RefreshToken: data.RefreshToken, + Expiry: time.Now().Add(-time.Hour), + } + token, err := oauth2Config.TokenSource(ctx, t).Token() + if err != nil { + return ident, fmt.Errorf("gitlab: failed to get refresh token: %v", err) + } + return c.identity(ctx, s, token) + } + case data.AccessToken != "": + { + token := &oauth2.Token{ + AccessToken: data.AccessToken, + } + return c.identity(ctx, s, token) + } + default: + return ident, errors.New("no refresh or access token found") + } +} + +func (c *gitlabConnector) groupsRequired(groupScope bool) bool { + return len(c.groups) > 0 || groupScope +} + +// user queries the GitLab API for profile information using the provided client. The HTTP +// client is expected to be constructed by the golang.org/x/oauth2 package, which inserts +// a bearer token as part of the request. +func (c *gitlabConnector) user(ctx context.Context, client *http.Client) (gitlabUser, error) { + var u gitlabUser + req, err := http.NewRequest("GET", c.baseURL+"/api/v4/user", nil) + if err != nil { + return u, fmt.Errorf("gitlab: new req: %v", err) + } + req = req.WithContext(ctx) + resp, err := client.Do(req) + if err != nil { + return u, fmt.Errorf("gitlab: get URL %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return u, fmt.Errorf("gitlab: read body: %v", err) + } + return u, fmt.Errorf("%s: %s", resp.Status, body) + } + + if err := json.NewDecoder(resp.Body).Decode(&u); err != nil { + return u, fmt.Errorf("failed to decode response: %v", err) + } + return u, nil +} + +type userInfo struct { + Groups []string +} + +// userGroups queries the GitLab API for group membership. +// +// The HTTP passed client is expected to be constructed by the golang.org/x/oauth2 package, +// which inserts a bearer token as part of the request. +func (c *gitlabConnector) userGroups(ctx context.Context, client *http.Client) ([]string, error) { + req, err := http.NewRequest("GET", c.baseURL+"/oauth/userinfo", nil) + if err != nil { + return nil, fmt.Errorf("gitlab: new req: %v", err) + } + req = req.WithContext(ctx) + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("gitlab: get URL %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("gitlab: read body: %v", err) + } + return nil, fmt.Errorf("%s: %s", resp.Status, body) + } + var u userInfo + if err := json.NewDecoder(resp.Body).Decode(&u); err != nil { + return nil, fmt.Errorf("failed to decode response: %v", err) + } + + return u.Groups, nil +} + +func (c *gitlabConnector) getGroups(ctx context.Context, client *http.Client, groupScope bool, userLogin string) ([]string, error) { + gitlabGroups, err := c.userGroups(ctx, client) + if err != nil { + return nil, err + } + + if len(c.groups) > 0 { + filteredGroups := groups.Filter(gitlabGroups, c.groups) + if len(filteredGroups) == 0 { + return nil, fmt.Errorf("gitlab: user %q is not in any of the required groups", userLogin) + } + return filteredGroups, nil + } else if groupScope { + return gitlabGroups, nil + } + + return nil, nil +} diff --git a/vendor/github.com/dexidp/dex/connector/google/BUILD b/vendor/github.com/dexidp/dex/connector/google/BUILD new file mode 100644 index 00000000..248cd70d --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/google/BUILD @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "google", + srcs = ["google.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/google", + importpath = "github.com/dexidp/dex/connector/google", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/go-oidc/v3/oidc", + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/groups", + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/golang.org/x/exp/slices", + "@org_golang_google_api//admin/directory/v1:directory", + "@org_golang_google_api//option", + "@org_golang_x_oauth2//:oauth2", + "@org_golang_x_oauth2//google", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/google/google.go b/vendor/github.com/dexidp/dex/connector/google/google.go new file mode 100644 index 00000000..d5908672 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/google/google.go @@ -0,0 +1,385 @@ +// Package google implements logging in through Google's OpenID Connect provider. +package google + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "strings" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + "golang.org/x/exp/slices" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + admin "google.golang.org/api/admin/directory/v1" + "google.golang.org/api/option" + + "github.com/dexidp/dex/connector" + pkg_groups "github.com/dexidp/dex/pkg/groups" + "github.com/dexidp/dex/pkg/log" +) + +const ( + issuerURL = "https://accounts.google.com" + wildcardDomainToAdminEmail = "*" +) + +// Config holds configuration options for Google logins. +type Config struct { + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + RedirectURI string `json:"redirectURI"` + + Scopes []string `json:"scopes"` // defaults to "profile" and "email" + + // Optional list of whitelisted domains + // If this field is nonempty, only users from a listed domain will be allowed to log in + HostedDomains []string `json:"hostedDomains"` + + // Optional list of whitelisted groups + // If this field is nonempty, only users from a listed group will be allowed to log in + Groups []string `json:"groups"` + + // Optional path to service account json + // If nonempty, and groups claim is made, will use authentication from file to + // check groups with the admin directory api + ServiceAccountFilePath string `json:"serviceAccountFilePath"` + + // Deprecated: Use DomainToAdminEmail + AdminEmail string + + // Required if ServiceAccountFilePath + // The map workspace domain to email of a GSuite super user which the service account will impersonate + // when listing groups + DomainToAdminEmail map[string]string + + // If this field is true, fetch direct group membership and transitive group membership + FetchTransitiveGroupMembership bool `json:"fetchTransitiveGroupMembership"` +} + +// Open returns a connector which can be used to login users through Google. +func (c *Config) Open(id string, logger log.Logger) (conn connector.Connector, err error) { + if c.AdminEmail != "" { + log.Deprecated(logger, `google: use "domainToAdminEmail.*: %s" option instead of "adminEmail: %s".`, c.AdminEmail, c.AdminEmail) + if c.DomainToAdminEmail == nil { + c.DomainToAdminEmail = make(map[string]string) + } + + c.DomainToAdminEmail[wildcardDomainToAdminEmail] = c.AdminEmail + } + ctx, cancel := context.WithCancel(context.Background()) + + provider, err := oidc.NewProvider(ctx, issuerURL) + if err != nil { + cancel() + return nil, fmt.Errorf("failed to get provider: %v", err) + } + + scopes := []string{oidc.ScopeOpenID} + if len(c.Scopes) > 0 { + scopes = append(scopes, c.Scopes...) + } else { + scopes = append(scopes, "profile", "email") + } + + adminSrv := make(map[string]*admin.Service) + + // We know impersonation is required when using a service account credential + // TODO: or is it? + if len(c.DomainToAdminEmail) == 0 && c.ServiceAccountFilePath != "" { + cancel() + return nil, fmt.Errorf("directory service requires the domainToAdminEmail option to be configured") + } + + // Fixing a regression caused by default config fallback: https://github.com/dexidp/dex/issues/2699 + if (c.ServiceAccountFilePath != "" && len(c.DomainToAdminEmail) > 0) || slices.Contains(scopes, "groups") { + for domain, adminEmail := range c.DomainToAdminEmail { + srv, err := createDirectoryService(c.ServiceAccountFilePath, adminEmail, logger) + if err != nil { + cancel() + return nil, fmt.Errorf("could not create directory service: %v", err) + } + + adminSrv[domain] = srv + } + } + + clientID := c.ClientID + return &googleConnector{ + redirectURI: c.RedirectURI, + oauth2Config: &oauth2.Config{ + ClientID: clientID, + ClientSecret: c.ClientSecret, + Endpoint: provider.Endpoint(), + Scopes: scopes, + RedirectURL: c.RedirectURI, + }, + verifier: provider.Verifier( + &oidc.Config{ClientID: clientID}, + ), + logger: logger, + cancel: cancel, + hostedDomains: c.HostedDomains, + groups: c.Groups, + serviceAccountFilePath: c.ServiceAccountFilePath, + domainToAdminEmail: c.DomainToAdminEmail, + fetchTransitiveGroupMembership: c.FetchTransitiveGroupMembership, + adminSrv: adminSrv, + }, nil +} + +var ( + _ connector.CallbackConnector = (*googleConnector)(nil) + _ connector.RefreshConnector = (*googleConnector)(nil) +) + +type googleConnector struct { + redirectURI string + oauth2Config *oauth2.Config + verifier *oidc.IDTokenVerifier + cancel context.CancelFunc + logger log.Logger + hostedDomains []string + groups []string + serviceAccountFilePath string + domainToAdminEmail map[string]string + fetchTransitiveGroupMembership bool + adminSrv map[string]*admin.Service +} + +func (c *googleConnector) Close() error { + c.cancel() + return nil +} + +func (c *googleConnector) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) { + if c.redirectURI != callbackURL { + return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) + } + + var opts []oauth2.AuthCodeOption + if len(c.hostedDomains) > 0 { + preferredDomain := c.hostedDomains[0] + if len(c.hostedDomains) > 1 { + preferredDomain = "*" + } + opts = append(opts, oauth2.SetAuthURLParam("hd", preferredDomain)) + } + + if s.OfflineAccess { + opts = append(opts, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("prompt", "consent")) + } + return c.oauth2Config.AuthCodeURL(state, opts...), nil +} + +type oauth2Error struct { + error string + errorDescription string +} + +func (e *oauth2Error) Error() string { + if e.errorDescription == "" { + return e.error + } + return e.error + ": " + e.errorDescription +} + +func (c *googleConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { + q := r.URL.Query() + if errType := q.Get("error"); errType != "" { + return identity, &oauth2Error{errType, q.Get("error_description")} + } + token, err := c.oauth2Config.Exchange(r.Context(), q.Get("code")) + if err != nil { + return identity, fmt.Errorf("google: failed to get token: %v", err) + } + + return c.createIdentity(r.Context(), identity, s, token) +} + +func (c *googleConnector) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { + t := &oauth2.Token{ + RefreshToken: string(identity.ConnectorData), + Expiry: time.Now().Add(-time.Hour), + } + token, err := c.oauth2Config.TokenSource(ctx, t).Token() + if err != nil { + return identity, fmt.Errorf("google: failed to get token: %v", err) + } + + return c.createIdentity(ctx, identity, s, token) +} + +func (c *googleConnector) createIdentity(ctx context.Context, identity connector.Identity, s connector.Scopes, token *oauth2.Token) (connector.Identity, error) { + rawIDToken, ok := token.Extra("id_token").(string) + if !ok { + return identity, errors.New("google: no id_token in token response") + } + idToken, err := c.verifier.Verify(ctx, rawIDToken) + if err != nil { + return identity, fmt.Errorf("google: failed to verify ID Token: %v", err) + } + + var claims struct { + Username string `json:"name"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + HostedDomain string `json:"hd"` + } + if err := idToken.Claims(&claims); err != nil { + return identity, fmt.Errorf("oidc: failed to decode claims: %v", err) + } + + if len(c.hostedDomains) > 0 { + found := false + for _, domain := range c.hostedDomains { + if claims.HostedDomain == domain { + found = true + break + } + } + + if !found { + return identity, fmt.Errorf("oidc: unexpected hd claim %v", claims.HostedDomain) + } + } + + var groups []string + if s.Groups && len(c.adminSrv) > 0 { + checkedGroups := make(map[string]struct{}) + groups, err = c.getGroups(claims.Email, c.fetchTransitiveGroupMembership, checkedGroups) + if err != nil { + return identity, fmt.Errorf("google: could not retrieve groups: %v", err) + } + + if len(c.groups) > 0 { + groups = pkg_groups.Filter(groups, c.groups) + if len(groups) == 0 { + return identity, fmt.Errorf("google: user %q is not in any of the required groups", claims.Username) + } + } + } + + identity = connector.Identity{ + UserID: idToken.Subject, + Username: claims.Username, + Email: claims.Email, + EmailVerified: claims.EmailVerified, + ConnectorData: []byte(token.RefreshToken), + Groups: groups, + } + return identity, nil +} + +// getGroups creates a connection to the admin directory service and lists +// all groups the user is a member of +func (c *googleConnector) getGroups(email string, fetchTransitiveGroupMembership bool, checkedGroups map[string]struct{}) ([]string, error) { + var userGroups []string + var err error + groupsList := &admin.Groups{} + domain := c.extractDomainFromEmail(email) + adminSrv, err := c.findAdminService(domain) + if err != nil { + return nil, err + } + + for { + groupsList, err = adminSrv.Groups.List(). + UserKey(email).PageToken(groupsList.NextPageToken).Do() + if err != nil { + return nil, fmt.Errorf("could not list groups: %v", err) + } + + for _, group := range groupsList.Groups { + if _, exists := checkedGroups[group.Email]; exists { + continue + } + + checkedGroups[group.Email] = struct{}{} + // TODO (joelspeed): Make desired group key configurable + userGroups = append(userGroups, group.Email) + + if !fetchTransitiveGroupMembership { + continue + } + + // getGroups takes a user's email/alias as well as a group's email/alias + transitiveGroups, err := c.getGroups(group.Email, fetchTransitiveGroupMembership, checkedGroups) + if err != nil { + return nil, fmt.Errorf("could not list transitive groups: %v", err) + } + + userGroups = append(userGroups, transitiveGroups...) + } + + if groupsList.NextPageToken == "" { + break + } + } + + return userGroups, nil +} + +func (c *googleConnector) findAdminService(domain string) (*admin.Service, error) { + adminSrv, ok := c.adminSrv[domain] + if !ok { + adminSrv, ok = c.adminSrv[wildcardDomainToAdminEmail] + c.logger.Debugf("using wildcard (%s) admin email to fetch groups", c.domainToAdminEmail[wildcardDomainToAdminEmail]) + } + + if !ok { + return nil, fmt.Errorf("unable to find super admin email, domainToAdminEmail for domain: %s not set, %s is also empty", domain, wildcardDomainToAdminEmail) + } + + return adminSrv, nil +} + +// extracts the domain name from an email input. If the email is valid, it returns the domain name after the "@" symbol. +// However, in the case of a broken or invalid email, it returns a wildcard symbol. +func (c *googleConnector) extractDomainFromEmail(email string) string { + at := strings.LastIndex(email, "@") + if at >= 0 { + _, domain := email[:at], email[at+1:] + + return domain + } + + return wildcardDomainToAdminEmail +} + +// createDirectoryService sets up super user impersonation and creates an admin client for calling +// the google admin api. If no serviceAccountFilePath is defined, the application default credential +// is used. +func createDirectoryService(serviceAccountFilePath, email string, logger log.Logger) (*admin.Service, error) { + var jsonCredentials []byte + var err error + + ctx := context.Background() + if serviceAccountFilePath == "" { + logger.Warn("the application default credential is used since the service account file path is not used") + credential, err := google.FindDefaultCredentials(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch application default credentials: %w", err) + } + jsonCredentials = credential.JSON + } else { + jsonCredentials, err = os.ReadFile(serviceAccountFilePath) + if err != nil { + return nil, fmt.Errorf("error reading credentials from file: %v", err) + } + } + config, err := google.JWTConfigFromJSON(jsonCredentials, admin.AdminDirectoryGroupReadonlyScope) + if err != nil { + return nil, fmt.Errorf("unable to parse client secret file to config: %v", err) + } + + // Only attempt impersonation when there is a user configured + if email != "" { + config.Subject = email + } + + return admin.NewService(ctx, option.WithHTTPClient(config.Client(ctx))) +} diff --git a/vendor/github.com/dexidp/dex/connector/keystone/BUILD b/vendor/github.com/dexidp/dex/connector/keystone/BUILD new file mode 100644 index 00000000..e59d3fbb --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/keystone/BUILD @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "keystone", + srcs = ["keystone.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/keystone", + importpath = "github.com/dexidp/dex/connector/keystone", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/log", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/keystone/keystone.go b/vendor/github.com/dexidp/dex/connector/keystone/keystone.go new file mode 100644 index 00000000..03f47331 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/keystone/keystone.go @@ -0,0 +1,312 @@ +// Package keystone provides authentication strategy using Keystone. +package keystone + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/log" +) + +type conn struct { + Domain string + Host string + AdminUsername string + AdminPassword string + client *http.Client + Logger log.Logger +} + +type userKeystone struct { + Domain domainKeystone `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +type domainKeystone struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// Config holds the configuration parameters for Keystone connector. +// Keystone should expose API v3 +// An example config: +// +// connectors: +// type: keystone +// id: keystone +// name: Keystone +// config: +// keystoneHost: http://example:5000 +// domain: default +// keystoneUsername: demo +// keystonePassword: DEMO_PASS +type Config struct { + Domain string `json:"domain"` + Host string `json:"keystoneHost"` + AdminUsername string `json:"keystoneUsername"` + AdminPassword string `json:"keystonePassword"` +} + +type loginRequestData struct { + auth `json:"auth"` +} + +type auth struct { + Identity identity `json:"identity"` +} + +type identity struct { + Methods []string `json:"methods"` + Password password `json:"password"` +} + +type password struct { + User user `json:"user"` +} + +type user struct { + Name string `json:"name"` + Domain domain `json:"domain"` + Password string `json:"password"` +} + +type domain struct { + ID string `json:"id"` +} + +type token struct { + User userKeystone `json:"user"` +} + +type tokenResponse struct { + Token token `json:"token"` +} + +type group struct { + ID string `json:"id"` + Name string `json:"name"` +} + +type groupsResponse struct { + Groups []group `json:"groups"` +} + +type userResponse struct { + User struct { + Name string `json:"name"` + Email string `json:"email"` + ID string `json:"id"` + } `json:"user"` +} + +var ( + _ connector.PasswordConnector = &conn{} + _ connector.RefreshConnector = &conn{} +) + +// Open returns an authentication strategy using Keystone. +func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { + return &conn{ + Domain: c.Domain, + Host: c.Host, + AdminUsername: c.AdminUsername, + AdminPassword: c.AdminPassword, + Logger: logger, + client: http.DefaultClient, + }, nil +} + +func (p *conn) Close() error { return nil } + +func (p *conn) Login(ctx context.Context, scopes connector.Scopes, username, password string) (identity connector.Identity, validPassword bool, err error) { + resp, err := p.getTokenResponse(ctx, username, password) + if err != nil { + return identity, false, fmt.Errorf("keystone: error %v", err) + } + if resp.StatusCode/100 != 2 { + return identity, false, fmt.Errorf("keystone login: error %v", resp.StatusCode) + } + if resp.StatusCode != 201 { + return identity, false, nil + } + token := resp.Header.Get("X-Subject-Token") + data, err := io.ReadAll(resp.Body) + if err != nil { + return identity, false, err + } + defer resp.Body.Close() + tokenResp := new(tokenResponse) + err = json.Unmarshal(data, &tokenResp) + if err != nil { + return identity, false, fmt.Errorf("keystone: invalid token response: %v", err) + } + if scopes.Groups { + groups, err := p.getUserGroups(ctx, tokenResp.Token.User.ID, token) + if err != nil { + return identity, false, err + } + identity.Groups = groups + } + identity.Username = username + identity.UserID = tokenResp.Token.User.ID + + user, err := p.getUser(ctx, tokenResp.Token.User.ID, token) + if err != nil { + return identity, false, err + } + if user.User.Email != "" { + identity.Email = user.User.Email + identity.EmailVerified = true + } + + return identity, true, nil +} + +func (p *conn) Prompt() string { return "username" } + +func (p *conn) Refresh( + ctx context.Context, scopes connector.Scopes, identity connector.Identity, +) (connector.Identity, error) { + token, err := p.getAdminToken(ctx) + if err != nil { + return identity, fmt.Errorf("keystone: failed to obtain admin token: %v", err) + } + ok, err := p.checkIfUserExists(ctx, identity.UserID, token) + if err != nil { + return identity, err + } + if !ok { + return identity, fmt.Errorf("keystone: user %q does not exist", identity.UserID) + } + if scopes.Groups { + groups, err := p.getUserGroups(ctx, identity.UserID, token) + if err != nil { + return identity, err + } + identity.Groups = groups + } + return identity, nil +} + +func (p *conn) getTokenResponse(ctx context.Context, username, pass string) (response *http.Response, err error) { + jsonData := loginRequestData{ + auth: auth{ + Identity: identity{ + Methods: []string{"password"}, + Password: password{ + User: user{ + Name: username, + Domain: domain{ID: p.Domain}, + Password: pass, + }, + }, + }, + }, + } + jsonValue, err := json.Marshal(jsonData) + if err != nil { + return nil, err + } + // https://developer.openstack.org/api-ref/identity/v3/#password-authentication-with-unscoped-authorization + authTokenURL := p.Host + "/v3/auth/tokens/" + req, err := http.NewRequest("POST", authTokenURL, bytes.NewBuffer(jsonValue)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req = req.WithContext(ctx) + + return p.client.Do(req) +} + +func (p *conn) getAdminToken(ctx context.Context) (string, error) { + resp, err := p.getTokenResponse(ctx, p.AdminUsername, p.AdminPassword) + if err != nil { + return "", err + } + defer resp.Body.Close() + + token := resp.Header.Get("X-Subject-Token") + return token, nil +} + +func (p *conn) checkIfUserExists(ctx context.Context, userID string, token string) (bool, error) { + user, err := p.getUser(ctx, userID, token) + return user != nil, err +} + +func (p *conn) getUser(ctx context.Context, userID string, token string) (*userResponse, error) { + // https://developer.openstack.org/api-ref/identity/v3/#show-user-details + userURL := p.Host + "/v3/users/" + userID + req, err := http.NewRequest("GET", userURL, nil) + if err != nil { + return nil, err + } + + req.Header.Set("X-Auth-Token", token) + req = req.WithContext(ctx) + resp, err := p.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, err + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + user := userResponse{} + err = json.Unmarshal(data, &user) + if err != nil { + return nil, err + } + + return &user, nil +} + +func (p *conn) getUserGroups(ctx context.Context, userID string, token string) ([]string, error) { + // https://developer.openstack.org/api-ref/identity/v3/#list-groups-to-which-a-user-belongs + groupsURL := p.Host + "/v3/users/" + userID + "/groups" + req, err := http.NewRequest("GET", groupsURL, nil) + if err != nil { + return nil, err + } + req.Header.Set("X-Auth-Token", token) + req = req.WithContext(ctx) + resp, err := p.client.Do(req) + if err != nil { + p.Logger.Errorf("keystone: error while fetching user %q groups\n", userID) + return nil, err + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + groupsResp := new(groupsResponse) + + err = json.Unmarshal(data, &groupsResp) + if err != nil { + return nil, err + } + + groups := make([]string, len(groupsResp.Groups)) + for i, group := range groupsResp.Groups { + groups[i] = group.Name + } + return groups, nil +} diff --git a/vendor/github.com/dexidp/dex/connector/ldap/BUILD b/vendor/github.com/dexidp/dex/connector/ldap/BUILD new file mode 100644 index 00000000..7bf127cb --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/ldap/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ldap", + srcs = ["ldap.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/ldap", + importpath = "github.com/dexidp/dex/connector/ldap", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/github.com/go-ldap/ldap/v3:ldap", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/ldap/gen-certs.sh b/vendor/github.com/dexidp/dex/connector/ldap/gen-certs.sh new file mode 100644 index 00000000..8b0ea49b --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/ldap/gen-certs.sh @@ -0,0 +1,49 @@ +#!/bin/bash -e + +# Stolen from the coreos/matchbox repo. + +echo " +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name + +[req_distinguished_name] + +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names + +[alt_names] +DNS.101 = localhost +" > openssl.config + +openssl genrsa -out testdata/ca.key 2048 +openssl genrsa -out testdata/server.key 2048 + +openssl req \ + -x509 -new -nodes \ + -key testdata/ca.key \ + -days 10000 -out testdata/ca.crt \ + -subj "/CN=ldap-tests" + +openssl req \ + -new \ + -key testdata/server.key \ + -out testdata/server.csr \ + -subj "/CN=localhost" \ + -config openssl.config + +openssl x509 -req \ + -in testdata/server.csr \ + -CA testdata/ca.crt \ + -CAkey testdata/ca.key \ + -CAcreateserial \ + -out testdata/server.crt \ + -days 10000 \ + -extensions v3_req \ + -extfile openssl.config + +rm testdata/server.csr +rm testdata/ca.srl +rm openssl.config diff --git a/vendor/github.com/dexidp/dex/connector/ldap/ldap.go b/vendor/github.com/dexidp/dex/connector/ldap/ldap.go new file mode 100644 index 00000000..c50d8309 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/ldap/ldap.go @@ -0,0 +1,640 @@ +// Package ldap implements strategies for authenticating using the LDAP protocol. +package ldap + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "net" + "os" + "strings" + + "github.com/go-ldap/ldap/v3" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/log" +) + +// Config holds the configuration parameters for the LDAP connector. The LDAP +// connectors require executing two queries, the first to find the user based on +// the username and password given to the connector. The second to use the user +// entry to search for groups. +// +// An example config: +// +// type: ldap +// config: +// host: ldap.example.com:636 +// # The following field is required if using port 389. +// # insecureNoSSL: true +// rootCA: /etc/dex/ldap.ca +// bindDN: uid=serviceaccount,cn=users,dc=example,dc=com +// bindPW: password +// userSearch: +// # Would translate to the query "(&(objectClass=person)(uid=))" +// baseDN: cn=users,dc=example,dc=com +// filter: "(objectClass=person)" +// username: uid +// idAttr: uid +// emailAttr: mail +// nameAttr: name +// preferredUsernameAttr: uid +// groupSearch: +// # Would translate to the separate query per user matcher pair and aggregate results into a single group list: +// # "(&(|(objectClass=posixGroup)(objectClass=groupOfNames))(memberUid=))" +// # "(&(|(objectClass=posixGroup)(objectClass=groupOfNames))(member=))" +// baseDN: cn=groups,dc=example,dc=com +// filter: "(|(objectClass=posixGroup)(objectClass=groupOfNames))" +// userMatchers: +// - userAttr: uid +// groupAttr: memberUid +// # Use if full DN is needed and not available as any other attribute +// # Will only work if "DN" attribute does not exist in the record: +// - userAttr: DN +// groupAttr: member +// nameAttr: name +// + +// UserMatcher holds information about user and group matching. +type UserMatcher struct { + UserAttr string `json:"userAttr"` + GroupAttr string `json:"groupAttr"` +} + +// Config holds configuration options for LDAP logins. +type Config struct { + // The host and optional port of the LDAP server. If port isn't supplied, it will be + // guessed based on the TLS configuration. 389 or 636. + Host string `json:"host"` + + // Required if LDAP host does not use TLS. + InsecureNoSSL bool `json:"insecureNoSSL"` + + // Don't verify the CA. + InsecureSkipVerify bool `json:"insecureSkipVerify"` + + // Connect to the insecure port then issue a StartTLS command to negotiate a + // secure connection. If unsupplied secure connections will use the LDAPS + // protocol. + StartTLS bool `json:"startTLS"` + + // Path to a trusted root certificate file. + RootCA string `json:"rootCA"` + // Path to a client cert file generated by rootCA. + ClientCert string `json:"clientCert"` + // Path to a client private key file generated by rootCA. + ClientKey string `json:"clientKey"` + // Base64 encoded PEM data containing root CAs. + RootCAData []byte `json:"rootCAData"` + + // BindDN and BindPW for an application service account. The connector uses these + // credentials to search for users and groups. + BindDN string `json:"bindDN"` + BindPW string `json:"bindPW"` + + // UsernamePrompt allows users to override the username attribute (displayed + // in the username/password prompt). If unset, the handler will use + // "Username". + UsernamePrompt string `json:"usernamePrompt"` + + // User entry search configuration. + UserSearch struct { + // BaseDN to start the search from. For example "cn=users,dc=example,dc=com" + BaseDN string `json:"baseDN"` + + // Optional filter to apply when searching the directory. For example "(objectClass=person)" + Filter string `json:"filter"` + + // Attribute to match against the inputted username. This will be translated and combined + // with the other filter as "(=)". + Username string `json:"username"` + + // Can either be: + // * "sub" - search the whole sub tree + // * "one" - only search one level + Scope string `json:"scope"` + + // A mapping of attributes on the user entry to claims. + IDAttr string `json:"idAttr"` // Defaults to "uid" + EmailAttr string `json:"emailAttr"` // Defaults to "mail" + NameAttr string `json:"nameAttr"` // No default. + PreferredUsernameAttrAttr string `json:"preferredUsernameAttr"` // No default. + + // If this is set, the email claim of the id token will be constructed from the idAttr and + // value of emailSuffix. This should not include the @ character. + EmailSuffix string `json:"emailSuffix"` // No default. + } `json:"userSearch"` + + // Group search configuration. + GroupSearch struct { + // BaseDN to start the search from. For example "cn=groups,dc=example,dc=com" + BaseDN string `json:"baseDN"` + + // Optional filter to apply when searching the directory. For example "(objectClass=posixGroup)" + Filter string `json:"filter"` + + Scope string `json:"scope"` // Defaults to "sub" + + // DEPRECATED config options. Those are left for backward compatibility. + // See "UserMatchers" below for the current group to user matching implementation + // TODO: should be eventually removed from the code + UserAttr string `json:"userAttr"` + GroupAttr string `json:"groupAttr"` + + // Array of the field pairs used to match a user to a group. + // See the "UserMatcher" struct for the exact field names + // + // Each pair adds an additional requirement to the filter that an attribute in the group + // match the user's attribute value. For example that the "members" attribute of + // a group matches the "uid" of the user. The exact filter being added is: + // + // (userMatchers[n].=userMatchers[n].) + // + UserMatchers []UserMatcher `json:"userMatchers"` + + // The attribute of the group that represents its name. + NameAttr string `json:"nameAttr"` + } `json:"groupSearch"` +} + +func scopeString(i int) string { + switch i { + case ldap.ScopeBaseObject: + return "base" + case ldap.ScopeSingleLevel: + return "one" + case ldap.ScopeWholeSubtree: + return "sub" + default: + return "" + } +} + +func parseScope(s string) (int, bool) { + // NOTE(ericchiang): ScopeBaseObject doesn't really make sense for us because we + // never know the user's or group's DN. + switch s { + case "", "sub": + return ldap.ScopeWholeSubtree, true + case "one": + return ldap.ScopeSingleLevel, true + } + return 0, false +} + +// Build a list of group attr name to user attr value matchers. +// Function exists here to allow backward compatibility between old and new +// group to user matching implementations. +// See "Config.GroupSearch.UserMatchers" comments for the details +func userMatchers(c *Config, logger log.Logger) []UserMatcher { + if len(c.GroupSearch.UserMatchers) > 0 && c.GroupSearch.UserMatchers[0].UserAttr != "" { + return c.GroupSearch.UserMatchers + } + + log.Deprecated(logger, `LDAP: use groupSearch.userMatchers option instead of "userAttr/groupAttr" fields.`) + return []UserMatcher{ + { + UserAttr: c.GroupSearch.UserAttr, + GroupAttr: c.GroupSearch.GroupAttr, + }, + } +} + +// Open returns an authentication strategy using LDAP. +func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { + conn, err := c.OpenConnector(logger) + if err != nil { + return nil, err + } + return connector.Connector(conn), nil +} + +type refreshData struct { + Username string `json:"username"` + Entry ldap.Entry `json:"entry"` +} + +// OpenConnector is the same as Open but returns a type with all implemented connector interfaces. +func (c *Config) OpenConnector(logger log.Logger) (interface { + connector.Connector + connector.PasswordConnector + connector.RefreshConnector +}, error, +) { + return c.openConnector(logger) +} + +func (c *Config) openConnector(logger log.Logger) (*ldapConnector, error) { + requiredFields := []struct { + name string + val string + }{ + {"host", c.Host}, + {"userSearch.baseDN", c.UserSearch.BaseDN}, + {"userSearch.username", c.UserSearch.Username}, + } + + for _, field := range requiredFields { + if field.val == "" { + return nil, fmt.Errorf("ldap: missing required field %q", field.name) + } + } + + var ( + host string + err error + ) + if host, _, err = net.SplitHostPort(c.Host); err != nil { + host = c.Host + if c.InsecureNoSSL { + c.Host += ":389" + } else { + c.Host += ":636" + } + } + + tlsConfig := &tls.Config{ServerName: host, InsecureSkipVerify: c.InsecureSkipVerify} + if c.RootCA != "" || len(c.RootCAData) != 0 { + data := c.RootCAData + if len(data) == 0 { + var err error + if data, err = os.ReadFile(c.RootCA); err != nil { + return nil, fmt.Errorf("ldap: read ca file: %v", err) + } + } + rootCAs := x509.NewCertPool() + if !rootCAs.AppendCertsFromPEM(data) { + return nil, fmt.Errorf("ldap: no certs found in ca file") + } + tlsConfig.RootCAs = rootCAs + } + + if c.ClientKey != "" && c.ClientCert != "" { + cert, err := tls.LoadX509KeyPair(c.ClientCert, c.ClientKey) + if err != nil { + return nil, fmt.Errorf("ldap: load client cert failed: %v", err) + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + userSearchScope, ok := parseScope(c.UserSearch.Scope) + if !ok { + return nil, fmt.Errorf("userSearch.Scope unknown value %q", c.UserSearch.Scope) + } + groupSearchScope, ok := parseScope(c.GroupSearch.Scope) + if !ok { + return nil, fmt.Errorf("groupSearch.Scope unknown value %q", c.GroupSearch.Scope) + } + + // TODO(nabokihms): remove it after deleting deprecated groupSearch options + c.GroupSearch.UserMatchers = userMatchers(c, logger) + return &ldapConnector{*c, userSearchScope, groupSearchScope, tlsConfig, logger}, nil +} + +type ldapConnector struct { + Config + + userSearchScope int + groupSearchScope int + + tlsConfig *tls.Config + + logger log.Logger +} + +var ( + _ connector.PasswordConnector = (*ldapConnector)(nil) + _ connector.RefreshConnector = (*ldapConnector)(nil) +) + +// do initializes a connection to the LDAP directory and passes it to the +// provided function. It then performs appropriate teardown or reuse before +// returning. +func (c *ldapConnector) do(_ context.Context, f func(c *ldap.Conn) error) error { + // TODO(ericchiang): support context here + var ( + conn *ldap.Conn + err error + ) + switch { + case c.InsecureNoSSL: + conn, err = ldap.Dial("tcp", c.Host) + case c.StartTLS: + conn, err = ldap.Dial("tcp", c.Host) + if err != nil { + return fmt.Errorf("failed to connect: %v", err) + } + if err := conn.StartTLS(c.tlsConfig); err != nil { + return fmt.Errorf("start TLS failed: %v", err) + } + default: + conn, err = ldap.DialTLS("tcp", c.Host, c.tlsConfig) + } + if err != nil { + return fmt.Errorf("failed to connect: %v", err) + } + defer conn.Close() + + // If bindDN and bindPW are empty this will default to an anonymous bind. + if c.BindDN == "" && c.BindPW == "" { + if err := conn.UnauthenticatedBind(""); err != nil { + return fmt.Errorf("ldap: initial anonymous bind failed: %v", err) + } + } else if err := conn.Bind(c.BindDN, c.BindPW); err != nil { + return fmt.Errorf("ldap: initial bind for user %q failed: %v", c.BindDN, err) + } + + return f(conn) +} + +func (c *ldapConnector) getAttrs(e ldap.Entry, name string) []string { + for _, a := range e.Attributes { + if a.Name != name { + continue + } + return a.Values + } + if strings.ToLower(name) == "dn" { + return []string{e.DN} + } + + c.logger.Debugf("%q attribute is not fround in entry", name) + return nil +} + +func (c *ldapConnector) getAttr(e ldap.Entry, name string) string { + if a := c.getAttrs(e, name); len(a) > 0 { + return a[0] + } + return "" +} + +func (c *ldapConnector) identityFromEntry(user ldap.Entry) (ident connector.Identity, err error) { + // If we're missing any attributes, such as email or ID, we want to report + // an error rather than continuing. + missing := []string{} + + // Fill the identity struct using the attributes from the user entry. + if ident.UserID = c.getAttr(user, c.UserSearch.IDAttr); ident.UserID == "" { + missing = append(missing, c.UserSearch.IDAttr) + } + + if c.UserSearch.NameAttr != "" { + if ident.Username = c.getAttr(user, c.UserSearch.NameAttr); ident.Username == "" { + missing = append(missing, c.UserSearch.NameAttr) + } + } + + if c.UserSearch.PreferredUsernameAttrAttr != "" { + if ident.PreferredUsername = c.getAttr(user, c.UserSearch.PreferredUsernameAttrAttr); ident.PreferredUsername == "" { + missing = append(missing, c.UserSearch.PreferredUsernameAttrAttr) + } + } + + if c.UserSearch.EmailSuffix != "" { + ident.Email = ident.Username + "@" + c.UserSearch.EmailSuffix + } else if ident.Email = c.getAttr(user, c.UserSearch.EmailAttr); ident.Email == "" { + missing = append(missing, c.UserSearch.EmailAttr) + } + // TODO(ericchiang): Let this value be set from an attribute. + ident.EmailVerified = true + + if len(missing) != 0 { + err := fmt.Errorf("ldap: entry %q missing following required attribute(s): %q", user.DN, missing) + return connector.Identity{}, err + } + return ident, nil +} + +func (c *ldapConnector) userEntry(conn *ldap.Conn, username string) (user ldap.Entry, found bool, err error) { + filter := fmt.Sprintf("(%s=%s)", c.UserSearch.Username, ldap.EscapeFilter(username)) + if c.UserSearch.Filter != "" { + filter = fmt.Sprintf("(&%s%s)", c.UserSearch.Filter, filter) + } + + // Initial search. + req := &ldap.SearchRequest{ + BaseDN: c.UserSearch.BaseDN, + Filter: filter, + Scope: c.userSearchScope, + // We only need to search for these specific requests. + Attributes: []string{ + c.UserSearch.IDAttr, + c.UserSearch.EmailAttr, + // TODO(ericchiang): what if this contains duplicate values? + }, + } + + for _, matcher := range c.GroupSearch.UserMatchers { + req.Attributes = append(req.Attributes, matcher.UserAttr) + } + + if c.UserSearch.NameAttr != "" { + req.Attributes = append(req.Attributes, c.UserSearch.NameAttr) + } + + if c.UserSearch.PreferredUsernameAttrAttr != "" { + req.Attributes = append(req.Attributes, c.UserSearch.PreferredUsernameAttrAttr) + } + + c.logger.Infof("performing ldap search %s %s %s", + req.BaseDN, scopeString(req.Scope), req.Filter) + resp, err := conn.Search(req) + if err != nil { + return ldap.Entry{}, false, fmt.Errorf("ldap: search with filter %q failed: %v", req.Filter, err) + } + + switch n := len(resp.Entries); n { + case 0: + c.logger.Errorf("ldap: no results returned for filter: %q", filter) + return ldap.Entry{}, false, nil + case 1: + user = *resp.Entries[0] + c.logger.Infof("username %q mapped to entry %s", username, user.DN) + return user, true, nil + default: + return ldap.Entry{}, false, fmt.Errorf("ldap: filter returned multiple (%d) results: %q", n, filter) + } +} + +func (c *ldapConnector) Login(ctx context.Context, s connector.Scopes, username, password string) (ident connector.Identity, validPass bool, err error) { + // make this check to avoid unauthenticated bind to the LDAP server. + if password == "" { + return connector.Identity{}, false, nil + } + + var ( + // We want to return a different error if the user's password is incorrect vs + // if there was an error. + incorrectPass = false + user ldap.Entry + ) + + err = c.do(ctx, func(conn *ldap.Conn) error { + entry, found, err := c.userEntry(conn, username) + if err != nil { + return err + } + if !found { + incorrectPass = true + return nil + } + user = entry + + // Try to authenticate as the distinguished name. + if err := conn.Bind(user.DN, password); err != nil { + // Detect a bad password through the LDAP error code. + if ldapErr, ok := err.(*ldap.Error); ok { + switch ldapErr.ResultCode { + case ldap.LDAPResultInvalidCredentials: + c.logger.Errorf("ldap: invalid password for user %q", user.DN) + incorrectPass = true + return nil + case ldap.LDAPResultConstraintViolation: + c.logger.Errorf("ldap: constraint violation for user %q: %s", user.DN, ldapErr.Error()) + incorrectPass = true + return nil + } + } // will also catch all ldap.Error without a case statement above + return fmt.Errorf("ldap: failed to bind as dn %q: %v", user.DN, err) + } + return nil + }) + if err != nil { + return connector.Identity{}, false, err + } + if incorrectPass { + return connector.Identity{}, false, nil + } + + if ident, err = c.identityFromEntry(user); err != nil { + return connector.Identity{}, false, err + } + + if s.Groups { + groups, err := c.groups(ctx, user) + if err != nil { + return connector.Identity{}, false, fmt.Errorf("ldap: failed to query groups: %v", err) + } + ident.Groups = groups + } + + if s.OfflineAccess { + refresh := refreshData{ + Username: username, + Entry: user, + } + // Encode entry for follow up requests such as the groups query and + // refresh attempts. + if ident.ConnectorData, err = json.Marshal(refresh); err != nil { + return connector.Identity{}, false, fmt.Errorf("ldap: marshal entry: %v", err) + } + } + + return ident, true, nil +} + +func (c *ldapConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { + var data refreshData + if err := json.Unmarshal(ident.ConnectorData, &data); err != nil { + return ident, fmt.Errorf("ldap: failed to unmarshal internal data: %v", err) + } + + var user ldap.Entry + err := c.do(ctx, func(conn *ldap.Conn) error { + entry, found, err := c.userEntry(conn, data.Username) + if err != nil { + return err + } + if !found { + return fmt.Errorf("ldap: user not found %q", data.Username) + } + user = entry + return nil + }) + if err != nil { + return ident, err + } + if user.DN != data.Entry.DN { + return ident, fmt.Errorf("ldap: refresh for username %q expected DN %q got %q", data.Username, data.Entry.DN, user.DN) + } + + newIdent, err := c.identityFromEntry(user) + if err != nil { + return ident, err + } + newIdent.ConnectorData = ident.ConnectorData + + if s.Groups { + groups, err := c.groups(ctx, user) + if err != nil { + return connector.Identity{}, fmt.Errorf("ldap: failed to query groups: %v", err) + } + newIdent.Groups = groups + } + return newIdent, nil +} + +func (c *ldapConnector) groups(ctx context.Context, user ldap.Entry) ([]string, error) { + if c.GroupSearch.BaseDN == "" { + c.logger.Debugf("No groups returned for %q because no groups baseDN has been configured.", c.getAttr(user, c.UserSearch.NameAttr)) + return nil, nil + } + + var groups []*ldap.Entry + for _, matcher := range c.GroupSearch.UserMatchers { + for _, attr := range c.getAttrs(user, matcher.UserAttr) { + filter := fmt.Sprintf("(%s=%s)", matcher.GroupAttr, ldap.EscapeFilter(attr)) + if c.GroupSearch.Filter != "" { + filter = fmt.Sprintf("(&%s%s)", c.GroupSearch.Filter, filter) + } + + req := &ldap.SearchRequest{ + BaseDN: c.GroupSearch.BaseDN, + Filter: filter, + Scope: c.groupSearchScope, + Attributes: []string{c.GroupSearch.NameAttr}, + } + + gotGroups := false + if err := c.do(ctx, func(conn *ldap.Conn) error { + c.logger.Infof("performing ldap search %s %s %s", + req.BaseDN, scopeString(req.Scope), req.Filter) + resp, err := conn.Search(req) + if err != nil { + return fmt.Errorf("ldap: search failed: %v", err) + } + gotGroups = len(resp.Entries) != 0 + groups = append(groups, resp.Entries...) + return nil + }); err != nil { + return nil, err + } + if !gotGroups { + // TODO(ericchiang): Is this going to spam the logs? + c.logger.Errorf("ldap: groups search with filter %q returned no groups", filter) + } + } + } + + groupNames := make([]string, 0, len(groups)) + for _, group := range groups { + name := c.getAttr(*group, c.GroupSearch.NameAttr) + if name == "" { + // Be obnoxious about missing attributes. If the group entry is + // missing its name attribute, that indicates a misconfiguration. + // + // In the future we can add configuration options to just log these errors. + return nil, fmt.Errorf("ldap: group entity %q missing required attribute %q", + group.DN, c.GroupSearch.NameAttr) + } + + groupNames = append(groupNames, name) + } + return groupNames, nil +} + +func (c *ldapConnector) Prompt() string { + return c.UsernamePrompt +} diff --git a/vendor/github.com/dexidp/dex/connector/linkedin/BUILD b/vendor/github.com/dexidp/dex/connector/linkedin/BUILD new file mode 100644 index 00000000..7061c795 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/linkedin/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "linkedin", + srcs = ["linkedin.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/linkedin", + importpath = "github.com/dexidp/dex/connector/linkedin", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/log", + "@org_golang_x_oauth2//:oauth2", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/linkedin/linkedin.go b/vendor/github.com/dexidp/dex/connector/linkedin/linkedin.go new file mode 100644 index 00000000..f79f1c49 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/linkedin/linkedin.go @@ -0,0 +1,242 @@ +// Package linkedin provides authentication strategies using LinkedIn +package linkedin + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/oauth2" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/log" +) + +const ( + apiURL = "https://api.linkedin.com/v2" + authURL = "https://www.linkedin.com/oauth/v2/authorization" + tokenURL = "https://www.linkedin.com/oauth/v2/accessToken" +) + +// Config holds configuration options for LinkedIn logins. +type Config struct { + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + RedirectURI string `json:"redirectURI"` +} + +// Open returns a strategy for logging in through LinkedIn +func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { + return &linkedInConnector{ + oauth2Config: &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + Endpoint: oauth2.Endpoint{ + AuthURL: authURL, + TokenURL: tokenURL, + }, + Scopes: []string{"r_liteprofile", "r_emailaddress"}, + RedirectURL: c.RedirectURI, + }, + logger: logger, + }, nil +} + +type connectorData struct { + AccessToken string `json:"accessToken"` +} + +type linkedInConnector struct { + oauth2Config *oauth2.Config + logger log.Logger +} + +// LinkedIn doesn't provide refresh tokens, so refresh tokens issued by Dex +// will expire in 60 days (default LinkedIn token lifetime). +var ( + _ connector.CallbackConnector = (*linkedInConnector)(nil) + _ connector.RefreshConnector = (*linkedInConnector)(nil) +) + +// LoginURL returns an access token request URL +func (c *linkedInConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { + if c.oauth2Config.RedirectURL != callbackURL { + return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", + callbackURL, c.oauth2Config.RedirectURL) + } + + return c.oauth2Config.AuthCodeURL(state), nil +} + +// HandleCallback handles HTTP redirect from LinkedIn +func (c *linkedInConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { + q := r.URL.Query() + if errType := q.Get("error"); errType != "" { + return identity, &oauth2Error{errType, q.Get("error_description")} + } + + ctx := r.Context() + token, err := c.oauth2Config.Exchange(ctx, q.Get("code")) + if err != nil { + return identity, fmt.Errorf("linkedin: get token: %v", err) + } + + client := c.oauth2Config.Client(ctx, token) + profile, err := c.profile(ctx, client) + if err != nil { + return identity, fmt.Errorf("linkedin: get profile: %v", err) + } + + identity = connector.Identity{ + UserID: profile.ID, + Username: profile.fullname(), + Email: profile.Email, + EmailVerified: true, + } + + if s.OfflineAccess { + data := connectorData{AccessToken: token.AccessToken} + connData, err := json.Marshal(data) + if err != nil { + return identity, fmt.Errorf("linkedin: marshal connector data: %v", err) + } + identity.ConnectorData = connData + } + + return identity, nil +} + +func (c *linkedInConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { + if len(ident.ConnectorData) == 0 { + return ident, fmt.Errorf("linkedin: no upstream access token found") + } + + var data connectorData + if err := json.Unmarshal(ident.ConnectorData, &data); err != nil { + return ident, fmt.Errorf("linkedin: unmarshal access token: %v", err) + } + + client := c.oauth2Config.Client(ctx, &oauth2.Token{AccessToken: data.AccessToken}) + profile, err := c.profile(ctx, client) + if err != nil { + return ident, fmt.Errorf("linkedin: get profile: %v", err) + } + + ident.Username = profile.fullname() + ident.Email = profile.Email + + return ident, nil +} + +type profile struct { + ID string `json:"id"` + FirstName string `json:"localizedFirstName"` + LastName string `json:"localizedLastName"` + Email string `json:"emailAddress"` +} + +type emailresp struct { + Elements []struct { + Handle struct { + EmailAddress string `json:"emailAddress"` + } `json:"handle~"` + } `json:"elements"` +} + +// fullname returns a full name of a person, or email if the resulting name is +// empty +func (p profile) fullname() string { + fname := strings.TrimSpace(p.FirstName + " " + p.LastName) + if fname == "" { + return p.Email + } + + return fname +} + +func (c *linkedInConnector) primaryEmail(ctx context.Context, client *http.Client) (email string, err error) { + req, err := http.NewRequest("GET", apiURL+"/emailAddress?q=members&projection=(elements*(handle~))", nil) + if err != nil { + return email, fmt.Errorf("new req: %v", err) + } + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return email, fmt.Errorf("get URL %v", err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return email, fmt.Errorf("read body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return email, fmt.Errorf("%s: %s", resp.Status, body) + } + + var parsedResp emailresp + err = json.Unmarshal(body, &parsedResp) + if err == nil { + for _, elem := range parsedResp.Elements { + email = elem.Handle.EmailAddress + } + } + + if email == "" { + err = fmt.Errorf("email is not set") + } + + return email, err +} + +func (c *linkedInConnector) profile(ctx context.Context, client *http.Client) (p profile, err error) { + // https://docs.microsoft.com/en-us/linkedin/shared/integrations/people/profile-api + // https://docs.microsoft.com/en-us/linkedin/shared/integrations/people/primary-contact-api + // https://docs.microsoft.com/en-us/linkedin/consumer/integrations/self-serve/migration-faq#how-do-i-retrieve-the-members-email-address + req, err := http.NewRequest("GET", apiURL+"/me", nil) + if err != nil { + return p, fmt.Errorf("new req: %v", err) + } + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return p, fmt.Errorf("get URL %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return p, fmt.Errorf("read body: %v", err) + } + return p, fmt.Errorf("%s: %s", resp.Status, body) + } + + if err := json.NewDecoder(resp.Body).Decode(&p); err != nil { + return p, fmt.Errorf("JSON decode: %v", err) + } + + email, err := c.primaryEmail(ctx, client) + if err != nil { + return p, fmt.Errorf("fetching email: %v", err) + } + p.Email = email + + return p, err +} + +type oauth2Error struct { + error string + errorDescription string +} + +func (e *oauth2Error) Error() string { + if e.errorDescription == "" { + return e.error + } + return e.error + ": " + e.errorDescription +} diff --git a/vendor/github.com/dexidp/dex/connector/microsoft/BUILD b/vendor/github.com/dexidp/dex/connector/microsoft/BUILD new file mode 100644 index 00000000..8aa22092 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/microsoft/BUILD @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "microsoft", + srcs = ["microsoft.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/microsoft", + importpath = "github.com/dexidp/dex/connector/microsoft", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/groups", + "//vendor/github.com/dexidp/dex/pkg/log", + "@org_golang_x_oauth2//:oauth2", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/microsoft/microsoft.go b/vendor/github.com/dexidp/dex/connector/microsoft/microsoft.go new file mode 100644 index 00000000..719b92de --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/microsoft/microsoft.go @@ -0,0 +1,521 @@ +// Package microsoft provides authentication strategies using Microsoft. +package microsoft + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/oauth2" + + "github.com/dexidp/dex/connector" + groups_pkg "github.com/dexidp/dex/pkg/groups" + "github.com/dexidp/dex/pkg/log" +) + +// GroupNameFormat represents the format of the group identifier +// we use type of string instead of int because it's easier to +// marshall/unmarshall +type GroupNameFormat string + +// Possible values for GroupNameFormat +const ( + GroupID GroupNameFormat = "id" + GroupName GroupNameFormat = "name" +) + +const ( + // Microsoft requires this scope to access user's profile + scopeUser = "user.read" + // Microsoft requires this scope to list groups the user is a member of + // and resolve their ids to groups names. + scopeGroups = "directory.read.all" + // Microsoft requires this scope to return a refresh token + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-permissions-and-consent#offline_access + scopeOfflineAccess = "offline_access" +) + +// Config holds configuration options for microsoft logins. +type Config struct { + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + RedirectURI string `json:"redirectURI"` + Tenant string `json:"tenant"` + OnlySecurityGroups bool `json:"onlySecurityGroups"` + Groups []string `json:"groups"` + GroupNameFormat GroupNameFormat `json:"groupNameFormat"` + UseGroupsAsWhitelist bool `json:"useGroupsAsWhitelist"` + EmailToLowercase bool `json:"emailToLowercase"` + + // PromptType is used for the prompt query parameter. + // For valid values, see https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-auth-code-flow#request-an-authorization-code. + PromptType string `json:"promptType"` + DomainHint string `json:"domainHint"` + + Scopes []string `json:"scopes"` // defaults to scopeUser (user.read) +} + +// Open returns a strategy for logging in through Microsoft. +func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { + m := microsoftConnector{ + apiURL: "https://login.microsoftonline.com", + graphURL: "https://graph.microsoft.com", + redirectURI: c.RedirectURI, + clientID: c.ClientID, + clientSecret: c.ClientSecret, + tenant: c.Tenant, + onlySecurityGroups: c.OnlySecurityGroups, + groups: c.Groups, + groupNameFormat: c.GroupNameFormat, + useGroupsAsWhitelist: c.UseGroupsAsWhitelist, + logger: logger, + emailToLowercase: c.EmailToLowercase, + promptType: c.PromptType, + domainHint: c.DomainHint, + scopes: c.Scopes, + } + // By default allow logins from both personal and business/school + // accounts. + if m.tenant == "" { + m.tenant = "common" + } + + // By default, use group names + switch m.groupNameFormat { + case "": + m.groupNameFormat = GroupName + case GroupID, GroupName: + default: + return nil, fmt.Errorf("invalid groupNameFormat: %s", m.groupNameFormat) + } + + return &m, nil +} + +type connectorData struct { + AccessToken string `json:"accessToken"` + RefreshToken string `json:"refreshToken"` + Expiry time.Time `json:"expiry"` +} + +var ( + _ connector.CallbackConnector = (*microsoftConnector)(nil) + _ connector.RefreshConnector = (*microsoftConnector)(nil) +) + +type microsoftConnector struct { + apiURL string + graphURL string + redirectURI string + clientID string + clientSecret string + tenant string + onlySecurityGroups bool + groupNameFormat GroupNameFormat + groups []string + useGroupsAsWhitelist bool + logger log.Logger + emailToLowercase bool + promptType string + domainHint string + scopes []string +} + +func (c *microsoftConnector) isOrgTenant() bool { + return c.tenant != "common" && c.tenant != "consumers" && c.tenant != "organizations" +} + +func (c *microsoftConnector) groupsRequired(groupScope bool) bool { + return (len(c.groups) > 0 || groupScope) && c.isOrgTenant() +} + +func (c *microsoftConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config { + var microsoftScopes []string + if len(c.scopes) > 0 { + microsoftScopes = c.scopes + } else { + microsoftScopes = append(microsoftScopes, scopeUser) + } + if c.groupsRequired(scopes.Groups) { + microsoftScopes = append(microsoftScopes, scopeGroups) + } + + if scopes.OfflineAccess { + microsoftScopes = append(microsoftScopes, scopeOfflineAccess) + } + + return &oauth2.Config{ + ClientID: c.clientID, + ClientSecret: c.clientSecret, + Endpoint: oauth2.Endpoint{ + AuthURL: c.apiURL + "/" + c.tenant + "/oauth2/v2.0/authorize", + TokenURL: c.apiURL + "/" + c.tenant + "/oauth2/v2.0/token", + }, + Scopes: microsoftScopes, + RedirectURL: c.redirectURI, + } +} + +func (c *microsoftConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { + if c.redirectURI != callbackURL { + return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) + } + + var options []oauth2.AuthCodeOption + if c.promptType != "" { + options = append(options, oauth2.SetAuthURLParam("prompt", c.promptType)) + } + if c.domainHint != "" { + options = append(options, oauth2.SetAuthURLParam("domain_hint", c.domainHint)) + } + + return c.oauth2Config(scopes).AuthCodeURL(state, options...), nil +} + +func (c *microsoftConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { + q := r.URL.Query() + if errType := q.Get("error"); errType != "" { + return identity, &oauth2Error{errType, q.Get("error_description")} + } + + oauth2Config := c.oauth2Config(s) + + ctx := r.Context() + + token, err := oauth2Config.Exchange(ctx, q.Get("code")) + if err != nil { + return identity, fmt.Errorf("microsoft: failed to get token: %v", err) + } + + client := oauth2Config.Client(ctx, token) + + user, err := c.user(ctx, client) + if err != nil { + return identity, fmt.Errorf("microsoft: get user: %v", err) + } + + if c.emailToLowercase { + user.Email = strings.ToLower(user.Email) + } + + identity = connector.Identity{ + UserID: user.ID, + Username: user.Name, + Email: user.Email, + EmailVerified: true, + } + + if c.groupsRequired(s.Groups) { + groups, err := c.getGroups(ctx, client, user.ID) + if err != nil { + return identity, fmt.Errorf("microsoft: get groups: %v", err) + } + identity.Groups = groups + } + + if s.OfflineAccess { + data := connectorData{ + AccessToken: token.AccessToken, + RefreshToken: token.RefreshToken, + Expiry: token.Expiry, + } + connData, err := json.Marshal(data) + if err != nil { + return identity, fmt.Errorf("microsoft: marshal connector data: %v", err) + } + identity.ConnectorData = connData + } + + return identity, nil +} + +type tokenNotifyFunc func(*oauth2.Token) error + +// notifyRefreshTokenSource is essentially `oauth2.ReuseTokenSource` with `TokenNotifyFunc` added. +type notifyRefreshTokenSource struct { + new oauth2.TokenSource + mu sync.Mutex // guards t + t *oauth2.Token + f tokenNotifyFunc // called when token refreshed so new refresh token can be persisted +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *notifyRefreshTokenSource) Token() (*oauth2.Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, s.f(t) +} + +func (c *microsoftConnector) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { + if len(identity.ConnectorData) == 0 { + return identity, errors.New("microsoft: no upstream access token found") + } + + var data connectorData + if err := json.Unmarshal(identity.ConnectorData, &data); err != nil { + return identity, fmt.Errorf("microsoft: unmarshal access token: %v", err) + } + tok := &oauth2.Token{ + AccessToken: data.AccessToken, + RefreshToken: data.RefreshToken, + Expiry: data.Expiry, + } + + client := oauth2.NewClient(ctx, ¬ifyRefreshTokenSource{ + new: c.oauth2Config(s).TokenSource(ctx, tok), + t: tok, + f: func(tok *oauth2.Token) error { + data := connectorData{ + AccessToken: tok.AccessToken, + RefreshToken: tok.RefreshToken, + Expiry: tok.Expiry, + } + connData, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("microsoft: marshal connector data: %v", err) + } + identity.ConnectorData = connData + return nil + }, + }) + user, err := c.user(ctx, client) + if err != nil { + return identity, fmt.Errorf("microsoft: get user: %v", err) + } + + identity.Username = user.Name + identity.Email = user.Email + + if c.groupsRequired(s.Groups) { + groups, err := c.getGroups(ctx, client, user.ID) + if err != nil { + return identity, fmt.Errorf("microsoft: get groups: %v", err) + } + identity.Groups = groups + } + + return identity, nil +} + +// https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/user +// id - The unique identifier for the user. Inherited from +// +// directoryObject. Key. Not nullable. Read-only. +// +// displayName - The name displayed in the address book for the user. +// +// This is usually the combination of the user's first name, +// middle initial and last name. This property is required +// when a user is created and it cannot be cleared during +// updates. Supports $filter and $orderby. +// +// userPrincipalName - The user principal name (UPN) of the user. +// +// The UPN is an Internet-style login name for the user +// based on the Internet standard RFC 822. By convention, +// this should map to the user's email name. The general +// format is alias@domain, where domain must be present in +// the tenant’s collection of verified domains. This +// property is required when a user is created. The +// verified domains for the tenant can be accessed from the +// verifiedDomains property of organization. Supports +// $filter and $orderby. +type user struct { + ID string `json:"id"` + Name string `json:"displayName"` + Email string `json:"userPrincipalName"` +} + +func (c *microsoftConnector) user(ctx context.Context, client *http.Client) (u user, err error) { + // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/api/user_get + req, err := http.NewRequest("GET", c.graphURL+"/v1.0/me?$select=id,displayName,userPrincipalName", nil) + if err != nil { + return u, fmt.Errorf("new req: %v", err) + } + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return u, fmt.Errorf("get URL %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return u, newGraphError(resp.Body) + } + + if err := json.NewDecoder(resp.Body).Decode(&u); err != nil { + return u, fmt.Errorf("JSON decode: %v", err) + } + + return u, err +} + +// https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/group +// displayName - The display name for the group. This property is required when +// +// a group is created and it cannot be cleared during updates. +// Supports $filter and $orderby. +type group struct { + Name string `json:"displayName"` +} + +func (c *microsoftConnector) getGroups(ctx context.Context, client *http.Client, userID string) ([]string, error) { + userGroups, err := c.getGroupIDs(ctx, client) + if err != nil { + return nil, err + } + + if c.groupNameFormat == GroupName { + userGroups, err = c.getGroupNames(ctx, client, userGroups) + if err != nil { + return nil, err + } + } + + // ensure that the user is in at least one required group + filteredGroups := groups_pkg.Filter(userGroups, c.groups) + if len(c.groups) > 0 && len(filteredGroups) == 0 { + return nil, fmt.Errorf("microsoft: user %v not in any of the required groups", userID) + } else if c.useGroupsAsWhitelist { + return filteredGroups, nil + } + + return userGroups, nil +} + +func (c *microsoftConnector) getGroupIDs(ctx context.Context, client *http.Client) (ids []string, err error) { + // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/api/user_getmembergroups + in := &struct { + SecurityEnabledOnly bool `json:"securityEnabledOnly"` + }{c.onlySecurityGroups} + reqURL := c.graphURL + "/v1.0/me/getMemberGroups" + for { + var out []string + var next string + + next, err = c.post(ctx, client, reqURL, in, &out) + if err != nil { + return ids, err + } + + ids = append(ids, out...) + if next == "" { + return + } + reqURL = next + } +} + +func (c *microsoftConnector) getGroupNames(ctx context.Context, client *http.Client, ids []string) (groups []string, err error) { + if len(ids) == 0 { + return + } + + // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/api/directoryobject_getbyids + in := &struct { + IDs []string `json:"ids"` + Types []string `json:"types"` + }{ids, []string{"group"}} + reqURL := c.graphURL + "/v1.0/directoryObjects/getByIds" + for { + var out []group + var next string + + next, err = c.post(ctx, client, reqURL, in, &out) + if err != nil { + return groups, err + } + + for _, g := range out { + groups = append(groups, g.Name) + } + if next == "" { + return + } + reqURL = next + } +} + +func (c *microsoftConnector) post(ctx context.Context, client *http.Client, reqURL string, in interface{}, out interface{}) (string, error) { + var payload bytes.Buffer + + err := json.NewEncoder(&payload).Encode(in) + if err != nil { + return "", fmt.Errorf("microsoft: JSON encode: %v", err) + } + + req, err := http.NewRequest("POST", reqURL, &payload) + if err != nil { + return "", fmt.Errorf("new req: %v", err) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return "", fmt.Errorf("post URL %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", newGraphError(resp.Body) + } + + var next string + if err = json.NewDecoder(resp.Body).Decode(&struct { + NextLink *string `json:"@odata.nextLink"` + Value interface{} `json:"value"` + }{&next, out}); err != nil { + return "", fmt.Errorf("JSON decode: %v", err) + } + + return next, nil +} + +type graphError struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (e *graphError) Error() string { + return e.Code + ": " + e.Message +} + +func newGraphError(r io.Reader) error { + // https://developer.microsoft.com/en-us/graph/docs/concepts/errors + var ge graphError + if err := json.NewDecoder(r).Decode(&struct { + Error *graphError `json:"error"` + }{&ge}); err != nil { + return fmt.Errorf("JSON error decode: %v", err) + } + return &ge +} + +type oauth2Error struct { + error string + errorDescription string +} + +func (e *oauth2Error) Error() string { + if e.errorDescription == "" { + return e.error + } + return e.error + ": " + e.errorDescription +} diff --git a/vendor/github.com/dexidp/dex/connector/mock/BUILD b/vendor/github.com/dexidp/dex/connector/mock/BUILD new file mode 100644 index 00000000..df8804dc --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/mock/BUILD @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "mock", + srcs = ["connectortest.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/mock", + importpath = "github.com/dexidp/dex/connector/mock", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/log", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/mock/connectortest.go b/vendor/github.com/dexidp/dex/connector/mock/connectortest.go new file mode 100644 index 00000000..e97f9865 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/mock/connectortest.go @@ -0,0 +1,124 @@ +// Package mock implements connectors which help test various server components. +package mock + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/log" +) + +// NewCallbackConnector returns a mock connector which requires no user interaction. It always returns +// the same (fake) identity. +func NewCallbackConnector(logger log.Logger) connector.Connector { + return &Callback{ + Identity: connector.Identity{ + UserID: "0-385-28089-0", + Username: "Kilgore Trout", + Email: "kilgore@kilgore.trout", + EmailVerified: true, + Groups: []string{"authors"}, + ConnectorData: connectorData, + }, + Logger: logger, + } +} + +var ( + _ connector.CallbackConnector = &Callback{} + + _ connector.PasswordConnector = passwordConnector{} + _ connector.RefreshConnector = passwordConnector{} +) + +// Callback is a connector that requires no user interaction and always returns the same identity. +type Callback struct { + // The returned identity. + Identity connector.Identity + Logger log.Logger +} + +// LoginURL returns the URL to redirect the user to login with. +func (m *Callback) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) { + u, err := url.Parse(callbackURL) + if err != nil { + return "", fmt.Errorf("failed to parse callbackURL %q: %v", callbackURL, err) + } + v := u.Query() + v.Set("state", state) + u.RawQuery = v.Encode() + return u.String(), nil +} + +var connectorData = []byte("foobar") + +// HandleCallback parses the request and returns the user's identity +func (m *Callback) HandleCallback(s connector.Scopes, r *http.Request) (connector.Identity, error) { + return m.Identity, nil +} + +// Refresh updates the identity during a refresh token request. +func (m *Callback) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { + return m.Identity, nil +} + +func (m *Callback) TokenIdentity(ctx context.Context, subjectTokenType, subjectToken string) (connector.Identity, error) { + return m.Identity, nil +} + +// CallbackConfig holds the configuration parameters for a connector which requires no interaction. +type CallbackConfig struct{} + +// Open returns an authentication strategy which requires no user interaction. +func (c *CallbackConfig) Open(id string, logger log.Logger) (connector.Connector, error) { + return NewCallbackConnector(logger), nil +} + +// PasswordConfig holds the configuration for a mock connector which prompts for the supplied +// username and password. +type PasswordConfig struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// Open returns an authentication strategy which prompts for a predefined username and password. +func (c *PasswordConfig) Open(id string, logger log.Logger) (connector.Connector, error) { + if c.Username == "" { + return nil, errors.New("no username supplied") + } + if c.Password == "" { + return nil, errors.New("no password supplied") + } + return &passwordConnector{c.Username, c.Password, logger}, nil +} + +type passwordConnector struct { + username string + password string + logger log.Logger +} + +func (p passwordConnector) Close() error { return nil } + +func (p passwordConnector) Login(ctx context.Context, s connector.Scopes, username, password string) (identity connector.Identity, validPassword bool, err error) { + if username == p.username && password == p.password { + return connector.Identity{ + UserID: "0-385-28089-0", + Username: "Kilgore Trout", + Email: "kilgore@kilgore.trout", + EmailVerified: true, + ConnectorData: []byte(`{"test": "true"}`), + }, true, nil + } + return identity, false, nil +} + +func (p passwordConnector) Prompt() string { return "" } + +func (p passwordConnector) Refresh(_ context.Context, _ connector.Scopes, identity connector.Identity) (connector.Identity, error) { + return identity, nil +} diff --git a/vendor/github.com/dexidp/dex/connector/oauth/BUILD b/vendor/github.com/dexidp/dex/connector/oauth/BUILD new file mode 100644 index 00000000..524a91bb --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/oauth/BUILD @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "oauth", + srcs = ["oauth.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/oauth", + importpath = "github.com/dexidp/dex/connector/oauth", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/httpclient", + "//vendor/github.com/dexidp/dex/pkg/log", + "@org_golang_x_oauth2//:oauth2", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/oauth/oauth.go b/vendor/github.com/dexidp/dex/connector/oauth/oauth.go new file mode 100644 index 00000000..2fe39fd4 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/oauth/oauth.go @@ -0,0 +1,260 @@ +package oauth + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + + "golang.org/x/oauth2" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/httpclient" + "github.com/dexidp/dex/pkg/log" +) + +type oauthConnector struct { + clientID string + clientSecret string + redirectURI string + tokenURL string + authorizationURL string + userInfoURL string + scopes []string + userIDKey string + userNameKey string + preferredUsernameKey string + emailKey string + emailVerifiedKey string + groupsKey string + httpClient *http.Client + logger log.Logger +} + +type connectorData struct { + AccessToken string +} + +type Config struct { + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + RedirectURI string `json:"redirectURI"` + TokenURL string `json:"tokenURL"` + AuthorizationURL string `json:"authorizationURL"` + UserInfoURL string `json:"userInfoURL"` + Scopes []string `json:"scopes"` + RootCAs []string `json:"rootCAs"` + InsecureSkipVerify bool `json:"insecureSkipVerify"` + UserIDKey string `json:"userIDKey"` // defaults to "id" + ClaimMapping struct { + UserNameKey string `json:"userNameKey"` // defaults to "user_name" + PreferredUsernameKey string `json:"preferredUsernameKey"` // defaults to "preferred_username" + GroupsKey string `json:"groupsKey"` // defaults to "groups" + EmailKey string `json:"emailKey"` // defaults to "email" + EmailVerifiedKey string `json:"emailVerifiedKey"` // defaults to "email_verified" + } `json:"claimMapping"` +} + +func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { + var err error + + userIDKey := c.UserIDKey + if userIDKey == "" { + userIDKey = "id" + } + + userNameKey := c.ClaimMapping.UserNameKey + if userNameKey == "" { + userNameKey = "user_name" + } + + preferredUsernameKey := c.ClaimMapping.PreferredUsernameKey + if preferredUsernameKey == "" { + preferredUsernameKey = "preferred_username" + } + + groupsKey := c.ClaimMapping.GroupsKey + if groupsKey == "" { + groupsKey = "groups" + } + + emailKey := c.ClaimMapping.EmailKey + if emailKey == "" { + emailKey = "email" + } + + emailVerifiedKey := c.ClaimMapping.EmailVerifiedKey + if emailVerifiedKey == "" { + emailVerifiedKey = "email_verified" + } + + oauthConn := &oauthConnector{ + clientID: c.ClientID, + clientSecret: c.ClientSecret, + tokenURL: c.TokenURL, + authorizationURL: c.AuthorizationURL, + userInfoURL: c.UserInfoURL, + scopes: c.Scopes, + redirectURI: c.RedirectURI, + logger: logger, + userIDKey: userIDKey, + userNameKey: userNameKey, + preferredUsernameKey: preferredUsernameKey, + groupsKey: groupsKey, + emailKey: emailKey, + emailVerifiedKey: emailVerifiedKey, + } + + oauthConn.httpClient, err = httpclient.NewHTTPClient(c.RootCAs, c.InsecureSkipVerify) + if err != nil { + return nil, err + } + + return oauthConn, err +} + +func (c *oauthConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { + if c.redirectURI != callbackURL { + return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) + } + + oauth2Config := &oauth2.Config{ + ClientID: c.clientID, + ClientSecret: c.clientSecret, + Endpoint: oauth2.Endpoint{TokenURL: c.tokenURL, AuthURL: c.authorizationURL}, + RedirectURL: c.redirectURI, + Scopes: c.scopes, + } + + return oauth2Config.AuthCodeURL(state), nil +} + +func (c *oauthConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { + q := r.URL.Query() + if errType := q.Get("error"); errType != "" { + return identity, errors.New(q.Get("error_description")) + } + + oauth2Config := &oauth2.Config{ + ClientID: c.clientID, + ClientSecret: c.clientSecret, + Endpoint: oauth2.Endpoint{TokenURL: c.tokenURL, AuthURL: c.authorizationURL}, + RedirectURL: c.redirectURI, + Scopes: c.scopes, + } + + ctx := context.WithValue(r.Context(), oauth2.HTTPClient, c.httpClient) + + token, err := oauth2Config.Exchange(ctx, q.Get("code")) + if err != nil { + return identity, fmt.Errorf("OAuth connector: failed to get token: %v", err) + } + + client := oauth2.NewClient(ctx, oauth2.StaticTokenSource(token)) + + userInfoResp, err := client.Get(c.userInfoURL) + if err != nil { + return identity, fmt.Errorf("OAuth Connector: failed to execute request to userinfo: %v", err) + } + defer userInfoResp.Body.Close() + + if userInfoResp.StatusCode != http.StatusOK { + return identity, fmt.Errorf("OAuth Connector: failed to execute request to userinfo: status %d", userInfoResp.StatusCode) + } + + var userInfoResult map[string]interface{} + err = json.NewDecoder(userInfoResp.Body).Decode(&userInfoResult) + if err != nil { + return identity, fmt.Errorf("OAuth Connector: failed to parse userinfo: %v", err) + } + + userID, found := userInfoResult[c.userIDKey] + if !found { + return identity, fmt.Errorf("OAuth Connector: not found %v claim", c.userIDKey) + } + + switch userID.(type) { + case float64, int64, string: + identity.UserID = fmt.Sprintf("%v", userID) + default: + return identity, fmt.Errorf("OAuth Connector: %v claim should be string or number, got %T", c.userIDKey, userID) + } + + identity.Username, _ = userInfoResult[c.userNameKey].(string) + identity.PreferredUsername, _ = userInfoResult[c.preferredUsernameKey].(string) + identity.Email, _ = userInfoResult[c.emailKey].(string) + identity.EmailVerified, _ = userInfoResult[c.emailVerifiedKey].(bool) + + if s.Groups { + groups := map[string]struct{}{} + + c.addGroupsFromMap(groups, userInfoResult) + c.addGroupsFromToken(groups, token.AccessToken) + + for groupName := range groups { + identity.Groups = append(identity.Groups, groupName) + } + } + + if s.OfflineAccess { + data := connectorData{AccessToken: token.AccessToken} + connData, err := json.Marshal(data) + if err != nil { + return identity, fmt.Errorf("OAuth Connector: failed to parse connector data for offline access: %v", err) + } + identity.ConnectorData = connData + } + + return identity, nil +} + +func (c *oauthConnector) addGroupsFromMap(groups map[string]struct{}, result map[string]interface{}) error { + groupsClaim, ok := result[c.groupsKey].([]interface{}) + if !ok { + return errors.New("cannot convert to slice") + } + + for _, group := range groupsClaim { + if groupString, ok := group.(string); ok { + groups[groupString] = struct{}{} + } + if groupMap, ok := group.(map[string]interface{}); ok { + if groupName, ok := groupMap["name"].(string); ok { + groups[groupName] = struct{}{} + } + } + } + + return nil +} + +func (c *oauthConnector) addGroupsFromToken(groups map[string]struct{}, token string) error { + parts := strings.Split(token, ".") + if len(parts) < 2 { + return errors.New("invalid token") + } + + decoded, err := decode(parts[1]) + if err != nil { + return err + } + + var claimsMap map[string]interface{} + err = json.Unmarshal(decoded, &claimsMap) + if err != nil { + return err + } + + return c.addGroupsFromMap(groups, claimsMap) +} + +func decode(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/dexidp/dex/connector/oidc/BUILD b/vendor/github.com/dexidp/dex/connector/oidc/BUILD new file mode 100644 index 00000000..543b0bc9 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/oidc/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "oidc", + srcs = ["oidc.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/oidc", + importpath = "github.com/dexidp/dex/connector/oidc", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/go-oidc/v3/oidc", + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/httpclient", + "//vendor/github.com/dexidp/dex/pkg/log", + "@org_golang_x_oauth2//:oauth2", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/oidc/oidc.go b/vendor/github.com/dexidp/dex/connector/oidc/oidc.go new file mode 100644 index 00000000..ff4713c2 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/oidc/oidc.go @@ -0,0 +1,458 @@ +// Package oidc implements logging in through OpenID Connect providers. +package oidc + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + "golang.org/x/oauth2" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/httpclient" + "github.com/dexidp/dex/pkg/log" +) + +// Config holds configuration options for OpenID Connect logins. +type Config struct { + Issuer string `json:"issuer"` + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + RedirectURI string `json:"redirectURI"` + + // Causes client_secret to be passed as POST parameters instead of basic + // auth. This is specifically "NOT RECOMMENDED" by the OAuth2 RFC, but some + // providers require it. + // + // https://tools.ietf.org/html/rfc6749#section-2.3.1 + BasicAuthUnsupported *bool `json:"basicAuthUnsupported"` + + Scopes []string `json:"scopes"` // defaults to "profile" and "email" + + // HostedDomains was an optional list of whitelisted domains when using the OIDC connector with Google. + // Only users from a whitelisted domain were allowed to log in. + // Support for this option was removed from the OIDC connector. + // Consider switching to the Google connector which supports this option. + // + // Deprecated: will be removed in future releases. + HostedDomains []string `json:"hostedDomains"` + + // Certificates for SSL validation + RootCAs []string `json:"rootCAs"` + + // Override the value of email_verified to true in the returned claims + InsecureSkipEmailVerified bool `json:"insecureSkipEmailVerified"` + + // InsecureEnableGroups enables groups claims. This is disabled by default until https://github.com/dexidp/dex/issues/1065 is resolved + InsecureEnableGroups bool `json:"insecureEnableGroups"` + + // AcrValues (Authentication Context Class Reference Values) that specifies the Authentication Context Class Values + // within the Authentication Request that the Authorization Server is being requested to use for + // processing requests from this Client, with the values appearing in order of preference. + AcrValues []string `json:"acrValues"` + + // Disable certificate verification + InsecureSkipVerify bool `json:"insecureSkipVerify"` + + // GetUserInfo uses the userinfo endpoint to get additional claims for + // the token. This is especially useful where upstreams return "thin" + // id tokens + GetUserInfo bool `json:"getUserInfo"` + + UserIDKey string `json:"userIDKey"` + + UserNameKey string `json:"userNameKey"` + + // PromptType will be used fot the prompt parameter (when offline_access, by default prompt=consent) + PromptType string `json:"promptType"` + + // OverrideClaimMapping will be used to override the options defined in claimMappings. + // i.e. if there are 'email' and `preferred_email` claims available, by default Dex will always use the `email` claim independent of the ClaimMapping.EmailKey. + // This setting allows you to override the default behavior of Dex and enforce the mappings defined in `claimMapping`. + OverrideClaimMapping bool `json:"overrideClaimMapping"` // defaults to false + + ClaimMapping struct { + // Configurable key which contains the preferred username claims + PreferredUsernameKey string `json:"preferred_username"` // defaults to "preferred_username" + + // Configurable key which contains the email claims + EmailKey string `json:"email"` // defaults to "email" + + // Configurable key which contains the groups claims + GroupsKey string `json:"groups"` // defaults to "groups" + } `json:"claimMapping"` +} + +// Domains that don't support basic auth. golang.org/x/oauth2 has an internal +// list, but it only matches specific URLs, not top level domains. +var brokenAuthHeaderDomains = []string{ + // See: https://github.com/dexidp/dex/issues/859 + "okta.com", + "oktapreview.com", +} + +// connectorData stores information for sessions authenticated by this connector +type connectorData struct { + RefreshToken []byte +} + +// Detect auth header provider issues for known providers. This lets users +// avoid having to explicitly set "basicAuthUnsupported" in their config. +// +// Setting the config field always overrides values returned by this function. +func knownBrokenAuthHeaderProvider(issuerURL string) bool { + if u, err := url.Parse(issuerURL); err == nil { + for _, host := range brokenAuthHeaderDomains { + if u.Host == host || strings.HasSuffix(u.Host, "."+host) { + return true + } + } + } + return false +} + +// Open returns a connector which can be used to login users through an upstream +// OpenID Connect provider. +func (c *Config) Open(id string, logger log.Logger) (conn connector.Connector, err error) { + if len(c.HostedDomains) > 0 { + return nil, fmt.Errorf("support for the Hosted domains option had been deprecated and removed, consider switching to the Google connector") + } + + httpClient, err := httpclient.NewHTTPClient(c.RootCAs, c.InsecureSkipVerify) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) + + provider, err := oidc.NewProvider(ctx, c.Issuer) + if err != nil { + cancel() + return nil, fmt.Errorf("failed to get provider: %v", err) + } + + endpoint := provider.Endpoint() + + if c.BasicAuthUnsupported != nil { + // Setting "basicAuthUnsupported" always overrides our detection. + if *c.BasicAuthUnsupported { + endpoint.AuthStyle = oauth2.AuthStyleInParams + } + } else if knownBrokenAuthHeaderProvider(c.Issuer) { + endpoint.AuthStyle = oauth2.AuthStyleInParams + } + + scopes := []string{oidc.ScopeOpenID} + if len(c.Scopes) > 0 { + scopes = append(scopes, c.Scopes...) + } else { + scopes = append(scopes, "profile", "email") + } + + // PromptType should be "consent" by default, if not set + if c.PromptType == "" { + c.PromptType = "consent" + } + + clientID := c.ClientID + return &oidcConnector{ + provider: provider, + redirectURI: c.RedirectURI, + oauth2Config: &oauth2.Config{ + ClientID: clientID, + ClientSecret: c.ClientSecret, + Endpoint: endpoint, + Scopes: scopes, + RedirectURL: c.RedirectURI, + }, + verifier: provider.Verifier( + &oidc.Config{ClientID: clientID}, + ), + logger: logger, + cancel: cancel, + httpClient: httpClient, + insecureSkipEmailVerified: c.InsecureSkipEmailVerified, + insecureEnableGroups: c.InsecureEnableGroups, + acrValues: c.AcrValues, + getUserInfo: c.GetUserInfo, + promptType: c.PromptType, + userIDKey: c.UserIDKey, + userNameKey: c.UserNameKey, + overrideClaimMapping: c.OverrideClaimMapping, + preferredUsernameKey: c.ClaimMapping.PreferredUsernameKey, + emailKey: c.ClaimMapping.EmailKey, + groupsKey: c.ClaimMapping.GroupsKey, + }, nil +} + +var ( + _ connector.CallbackConnector = (*oidcConnector)(nil) + _ connector.RefreshConnector = (*oidcConnector)(nil) +) + +type oidcConnector struct { + provider *oidc.Provider + redirectURI string + oauth2Config *oauth2.Config + verifier *oidc.IDTokenVerifier + cancel context.CancelFunc + logger log.Logger + httpClient *http.Client + insecureSkipEmailVerified bool + insecureEnableGroups bool + acrValues []string + getUserInfo bool + promptType string + userIDKey string + userNameKey string + overrideClaimMapping bool + preferredUsernameKey string + emailKey string + groupsKey string +} + +func (c *oidcConnector) Close() error { + c.cancel() + return nil +} + +func (c *oidcConnector) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) { + if c.redirectURI != callbackURL { + return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) + } + + var opts []oauth2.AuthCodeOption + + if len(c.acrValues) > 0 { + acrValues := strings.Join(c.acrValues, " ") + opts = append(opts, oauth2.SetAuthURLParam("acr_values", acrValues)) + } + + if s.OfflineAccess { + opts = append(opts, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("prompt", c.promptType)) + } + return c.oauth2Config.AuthCodeURL(state, opts...), nil +} + +type oauth2Error struct { + error string + errorDescription string +} + +func (e *oauth2Error) Error() string { + if e.errorDescription == "" { + return e.error + } + return e.error + ": " + e.errorDescription +} + +type caller uint + +const ( + createCaller caller = iota + refreshCaller + exchangeCaller +) + +func (c *oidcConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { + q := r.URL.Query() + if errType := q.Get("error"); errType != "" { + return identity, &oauth2Error{errType, q.Get("error_description")} + } + + ctx := context.WithValue(r.Context(), oauth2.HTTPClient, c.httpClient) + + token, err := c.oauth2Config.Exchange(ctx, q.Get("code")) + if err != nil { + return identity, fmt.Errorf("oidc: failed to get token: %v", err) + } + return c.createIdentity(ctx, identity, token, createCaller) +} + +// Refresh is used to refresh a session with the refresh token provided by the IdP +func (c *oidcConnector) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { + cd := connectorData{} + err := json.Unmarshal(identity.ConnectorData, &cd) + if err != nil { + return identity, fmt.Errorf("oidc: failed to unmarshal connector data: %v", err) + } + + ctx = context.WithValue(ctx, oauth2.HTTPClient, c.httpClient) + + t := &oauth2.Token{ + RefreshToken: string(cd.RefreshToken), + Expiry: time.Now().Add(-time.Hour), + } + token, err := c.oauth2Config.TokenSource(ctx, t).Token() + if err != nil { + return identity, fmt.Errorf("oidc: failed to get refresh token: %v", err) + } + return c.createIdentity(ctx, identity, token, refreshCaller) +} + +func (c *oidcConnector) TokenIdentity(ctx context.Context, subjectTokenType, subjectToken string) (connector.Identity, error) { + var identity connector.Identity + token := &oauth2.Token{ + AccessToken: subjectToken, + TokenType: subjectTokenType, + } + return c.createIdentity(ctx, identity, token, exchangeCaller) +} + +func (c *oidcConnector) createIdentity(ctx context.Context, identity connector.Identity, token *oauth2.Token, caller caller) (connector.Identity, error) { + var claims map[string]interface{} + + if rawIDToken, ok := token.Extra("id_token").(string); ok { + idToken, err := c.verifier.Verify(ctx, rawIDToken) + if err != nil { + return identity, fmt.Errorf("oidc: failed to verify ID Token: %v", err) + } + + if err := idToken.Claims(&claims); err != nil { + return identity, fmt.Errorf("oidc: failed to decode claims: %v", err) + } + } else if caller == exchangeCaller { + switch token.TokenType { + case "urn:ietf:params:oauth:token-type:id_token": + // Verify only works on ID tokens + idToken, err := c.provider.Verifier(&oidc.Config{SkipClientIDCheck: true}).Verify(ctx, token.AccessToken) + if err != nil { + return identity, fmt.Errorf("oidc: failed to verify token: %v", err) + } + if err := idToken.Claims(&claims); err != nil { + return identity, fmt.Errorf("oidc: failed to decode claims: %v", err) + } + case "urn:ietf:params:oauth:token-type:access_token": + if !c.getUserInfo { + return identity, fmt.Errorf("oidc: getUserInfo is required for access token exchange") + } + default: + return identity, fmt.Errorf("unknown token type for token exchange: %s", token.TokenType) + } + } else if caller != refreshCaller { + // ID tokens aren't mandatory in the reply when using a refresh_token grant + return identity, errors.New("oidc: no id_token in token response") + } + + // We immediately want to run getUserInfo if configured before we validate the claims. + // For token exchanges with access tokens, this is how we verify the token. + if c.getUserInfo { + userInfo, err := c.provider.UserInfo(ctx, oauth2.StaticTokenSource(token)) + if err != nil { + return identity, fmt.Errorf("oidc: error loading userinfo: %v", err) + } + if err := userInfo.Claims(&claims); err != nil { + return identity, fmt.Errorf("oidc: failed to decode userinfo claims: %v", err) + } + } + + const subjectClaimKey = "sub" + subject, found := claims[subjectClaimKey].(string) + if !found { + return identity, fmt.Errorf("missing \"%s\" claim", subjectClaimKey) + } + + userNameKey := "name" + if c.userNameKey != "" { + userNameKey = c.userNameKey + } + name, found := claims[userNameKey].(string) + if !found { + return identity, fmt.Errorf("missing \"%s\" claim", userNameKey) + } + + preferredUsername, found := claims["preferred_username"].(string) + if (!found || c.overrideClaimMapping) && c.preferredUsernameKey != "" { + preferredUsername, _ = claims[c.preferredUsernameKey].(string) + } + + hasEmailScope := false + for _, s := range c.oauth2Config.Scopes { + if s == "email" { + hasEmailScope = true + break + } + } + + var email string + emailKey := "email" + email, found = claims[emailKey].(string) + if (!found || c.overrideClaimMapping) && c.emailKey != "" { + emailKey = c.emailKey + email, found = claims[emailKey].(string) + } + + if !found && hasEmailScope { + return identity, fmt.Errorf("missing email claim, not found \"%s\" key", emailKey) + } + + emailVerified, found := claims["email_verified"].(bool) + if !found { + if c.insecureSkipEmailVerified { + emailVerified = true + } else if hasEmailScope { + return identity, errors.New("missing \"email_verified\" claim") + } + } + + var groups []string + if c.insecureEnableGroups { + groupsKey := "groups" + vs, found := claims[groupsKey].([]interface{}) + if (!found || c.overrideClaimMapping) && c.groupsKey != "" { + groupsKey = c.groupsKey + vs, found = claims[groupsKey].([]interface{}) + } + + // Fallback when claims[groupsKey] is a string instead of an array of strings. + if g, b := claims[groupsKey].(string); b { + groups = []string{g} + } + + if found { + for _, v := range vs { + if s, ok := v.(string); ok { + groups = append(groups, s) + } else { + return identity, fmt.Errorf("malformed \"%v\" claim", groupsKey) + } + } + } + } + + cd := connectorData{ + RefreshToken: []byte(token.RefreshToken), + } + + connData, err := json.Marshal(&cd) + if err != nil { + return identity, fmt.Errorf("oidc: failed to encode connector data: %v", err) + } + + identity = connector.Identity{ + UserID: subject, + Username: name, + PreferredUsername: preferredUsername, + Email: email, + EmailVerified: emailVerified, + Groups: groups, + ConnectorData: connData, + } + + if c.userIDKey != "" { + userID, found := claims[c.userIDKey].(string) + if !found { + return identity, fmt.Errorf("oidc: not found %v claim", c.userIDKey) + } + identity.UserID = userID + } + + return identity, nil +} diff --git a/vendor/github.com/dexidp/dex/connector/openshift/BUILD b/vendor/github.com/dexidp/dex/connector/openshift/BUILD new file mode 100644 index 00000000..788ea8eb --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/openshift/BUILD @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "openshift", + srcs = ["openshift.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/openshift", + importpath = "github.com/dexidp/dex/connector/openshift", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/groups", + "//vendor/github.com/dexidp/dex/pkg/httpclient", + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi", + "@org_golang_x_oauth2//:oauth2", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/openshift/openshift.go b/vendor/github.com/dexidp/dex/connector/openshift/openshift.go new file mode 100644 index 00000000..99d1b5b2 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/openshift/openshift.go @@ -0,0 +1,267 @@ +package openshift + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/oauth2" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/groups" + "github.com/dexidp/dex/pkg/httpclient" + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage/kubernetes/k8sapi" +) + +const ( + wellKnownURLPath = "/.well-known/oauth-authorization-server" + usersURLPath = "/apis/user.openshift.io/v1/users/~" +) + +// Config holds configuration options for OpenShift login +type Config struct { + Issuer string `json:"issuer"` + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + RedirectURI string `json:"redirectURI"` + Groups []string `json:"groups"` + InsecureCA bool `json:"insecureCA"` + RootCA string `json:"rootCA"` +} + +var ( + _ connector.CallbackConnector = (*openshiftConnector)(nil) + _ connector.RefreshConnector = (*openshiftConnector)(nil) +) + +type openshiftConnector struct { + apiURL string + redirectURI string + clientID string + clientSecret string + cancel context.CancelFunc + logger log.Logger + httpClient *http.Client + oauth2Config *oauth2.Config + insecureCA bool + rootCA string + groups []string +} + +type user struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + Identities []string `json:"identities" protobuf:"bytes,3,rep,name=identities"` + FullName string `json:"fullName,omitempty" protobuf:"bytes,2,opt,name=fullName"` + Groups []string `json:"groups" protobuf:"bytes,4,rep,name=groups"` +} + +// Open returns a connector which can be used to login users through an upstream +// OpenShift OAuth2 provider. +func (c *Config) Open(id string, logger log.Logger) (conn connector.Connector, err error) { + var rootCAs []string + if c.RootCA != "" { + rootCAs = append(rootCAs, c.RootCA) + } + + httpClient, err := httpclient.NewHTTPClient(rootCAs, c.InsecureCA) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP client: %w", err) + } + + return c.OpenWithHTTPClient(id, logger, httpClient) +} + +// OpenWithHTTPClient returns a connector which can be used to login users through an upstream +// OpenShift OAuth2 provider. It provides the ability to inject a http.Client. +func (c *Config) OpenWithHTTPClient(id string, logger log.Logger, + httpClient *http.Client, +) (conn connector.Connector, err error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + wellKnownURL := strings.TrimSuffix(c.Issuer, "/") + wellKnownURLPath + req, err := http.NewRequest(http.MethodGet, wellKnownURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create a request to OpenShift endpoint %w", err) + } + + openshiftConnector := openshiftConnector{ + apiURL: c.Issuer, + cancel: cancel, + clientID: c.ClientID, + clientSecret: c.ClientSecret, + insecureCA: c.InsecureCA, + logger: logger, + redirectURI: c.RedirectURI, + rootCA: c.RootCA, + groups: c.Groups, + httpClient: httpClient, + } + + var metadata struct { + Auth string `json:"authorization_endpoint"` + Token string `json:"token_endpoint"` + } + + resp, err := openshiftConnector.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("failed to query OpenShift endpoint %w", err) + } + + defer resp.Body.Close() + + if err := json.NewDecoder(resp.Body).Decode(&metadata); err != nil { + return nil, fmt.Errorf("discovery through endpoint %s failed to decode body: %w", + wellKnownURL, err) + } + + openshiftConnector.oauth2Config = &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + Endpoint: oauth2.Endpoint{ + AuthURL: metadata.Auth, TokenURL: metadata.Token, + }, + Scopes: []string{"user:info"}, + RedirectURL: c.RedirectURI, + } + return &openshiftConnector, nil +} + +func (c *openshiftConnector) Close() error { + c.cancel() + return nil +} + +// LoginURL returns the URL to redirect the user to login with. +func (c *openshiftConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { + if c.redirectURI != callbackURL { + return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", + callbackURL, c.redirectURI) + } + return c.oauth2Config.AuthCodeURL(state), nil +} + +type oauth2Error struct { + error string + errorDescription string +} + +func (e *oauth2Error) Error() string { + if e.errorDescription == "" { + return e.error + } + return e.error + ": " + e.errorDescription +} + +// HandleCallback parses the request and returns the user's identity +func (c *openshiftConnector) HandleCallback(s connector.Scopes, + r *http.Request, +) (identity connector.Identity, err error) { + q := r.URL.Query() + if errType := q.Get("error"); errType != "" { + return identity, &oauth2Error{errType, q.Get("error_description")} + } + + ctx := r.Context() + if c.httpClient != nil { + ctx = context.WithValue(r.Context(), oauth2.HTTPClient, c.httpClient) + } + + token, err := c.oauth2Config.Exchange(ctx, q.Get("code")) + if err != nil { + return identity, fmt.Errorf("oidc: failed to get token: %v", err) + } + + return c.identity(ctx, s, token) +} + +func (c *openshiftConnector) Refresh(ctx context.Context, s connector.Scopes, + oldID connector.Identity, +) (connector.Identity, error) { + var token oauth2.Token + err := json.Unmarshal(oldID.ConnectorData, &token) + if err != nil { + return connector.Identity{}, fmt.Errorf("parsing token: %w", err) + } + if c.httpClient != nil { + ctx = context.WithValue(ctx, oauth2.HTTPClient, c.httpClient) + } + return c.identity(ctx, s, &token) +} + +func (c *openshiftConnector) identity(ctx context.Context, s connector.Scopes, + token *oauth2.Token, +) (identity connector.Identity, err error) { + client := c.oauth2Config.Client(ctx, token) + user, err := c.user(ctx, client) + if err != nil { + return identity, fmt.Errorf("openshift: get user: %v", err) + } + + if len(c.groups) > 0 { + validGroups := validateAllowedGroups(user.Groups, c.groups) + + if !validGroups { + return identity, fmt.Errorf("openshift: user %q is not in any of the required groups", user.Name) + } + } + + identity = connector.Identity{ + UserID: user.UID, + Username: user.Name, + PreferredUsername: user.Name, + Email: user.Name, + Groups: user.Groups, + } + + if s.OfflineAccess { + connData, err := json.Marshal(token) + if err != nil { + return identity, fmt.Errorf("marshal connector data: %v", err) + } + identity.ConnectorData = connData + } + + return identity, nil +} + +// user function returns the OpenShift user associated with the authenticated user +func (c *openshiftConnector) user(ctx context.Context, client *http.Client) (u user, err error) { + url := c.apiURL + usersURLPath + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return u, fmt.Errorf("new req: %v", err) + } + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return u, fmt.Errorf("get URL %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return u, fmt.Errorf("read body: %v", err) + } + return u, fmt.Errorf("%s: %s", resp.Status, body) + } + + if err := json.NewDecoder(resp.Body).Decode(&u); err != nil { + return u, fmt.Errorf("JSON decode: %v", err) + } + + return u, err +} + +func validateAllowedGroups(userGroups, allowedGroups []string) bool { + matchingGroups := groups.Filter(userGroups, allowedGroups) + + return len(matchingGroups) != 0 +} diff --git a/vendor/github.com/dexidp/dex/connector/saml/BUILD b/vendor/github.com/dexidp/dex/connector/saml/BUILD new file mode 100644 index 00000000..7d312c3a --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/saml/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "saml", + srcs = [ + "saml.go", + "types.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/connector/saml", + importpath = "github.com/dexidp/dex/connector/saml", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/beevik/etree", + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/pkg/groups", + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/github.com/mattermost/xml-roundtrip-validator", + "//vendor/github.com/pkg/errors", + "//vendor/github.com/russellhaering/goxmldsig", + "//vendor/github.com/russellhaering/goxmldsig/etreeutils", + ], +) diff --git a/vendor/github.com/dexidp/dex/connector/saml/saml.go b/vendor/github.com/dexidp/dex/connector/saml/saml.go new file mode 100644 index 00000000..06712db6 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/saml/saml.go @@ -0,0 +1,644 @@ +// Package saml contains login methods for SAML. +package saml + +import ( + "bytes" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "encoding/xml" + "fmt" + "os" + "strings" + "sync" + "time" + + "github.com/beevik/etree" + xrv "github.com/mattermost/xml-roundtrip-validator" + "github.com/pkg/errors" + dsig "github.com/russellhaering/goxmldsig" + "github.com/russellhaering/goxmldsig/etreeutils" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/pkg/groups" + "github.com/dexidp/dex/pkg/log" +) + +//nolint +const ( + bindingRedirect = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" + bindingPOST = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" + + nameIDFormatEmailAddress = "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress" + nameIDFormatUnspecified = "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" + nameIDFormatX509Subject = "urn:oasis:names:tc:SAML:1.1:nameid-format:X509SubjectName" + nameIDFormatWindowsDN = "urn:oasis:names:tc:SAML:1.1:nameid-format:WindowsDomainQualifiedName" + nameIDFormatEncrypted = "urn:oasis:names:tc:SAML:2.0:nameid-format:encrypted" + nameIDFormatEntity = "urn:oasis:names:tc:SAML:2.0:nameid-format:entity" + nameIDFormatKerberos = "urn:oasis:names:tc:SAML:2.0:nameid-format:kerberos" + nameIDFormatPersistent = "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" + nameIDformatTransient = "urn:oasis:names:tc:SAML:2.0:nameid-format:transient" + + // top level status codes + statusCodeSuccess = "urn:oasis:names:tc:SAML:2.0:status:Success" + + // subject confirmation methods + subjectConfirmationMethodBearer = "urn:oasis:names:tc:SAML:2.0:cm:bearer" + + // allowed clock drift for timestamp validation + allowedClockDrift = time.Duration(30) * time.Second +) + +var ( + nameIDFormats = []string{ + nameIDFormatEmailAddress, + nameIDFormatUnspecified, + nameIDFormatX509Subject, + nameIDFormatWindowsDN, + nameIDFormatEncrypted, + nameIDFormatEntity, + nameIDFormatKerberos, + nameIDFormatPersistent, + nameIDformatTransient, + } + nameIDFormatLookup = make(map[string]string) + + lookupOnce sync.Once +) + +// Config represents configuration options for the SAML provider. +type Config struct { + // TODO(ericchiang): A bunch of these fields could be auto-filled if + // we supported SAML metadata discovery. + // + // https://www.oasis-open.org/committees/download.php/35391/sstc-saml-metadata-errata-2.0-wd-04-diff.pdf + + EntityIssuer string `json:"entityIssuer"` + SSOIssuer string `json:"ssoIssuer"` + SSOURL string `json:"ssoURL"` + + // X509 CA file or raw data to verify XML signatures. + CA string `json:"ca"` + CAData []byte `json:"caData"` + + InsecureSkipSignatureValidation bool `json:"insecureSkipSignatureValidation"` + + // Assertion attribute names to lookup various claims with. + UsernameAttr string `json:"usernameAttr"` + EmailAttr string `json:"emailAttr"` + GroupsAttr string `json:"groupsAttr"` + // If GroupsDelim is supplied the connector assumes groups are returned as a + // single string instead of multiple attribute values. This delimiter will be + // used split the groups string. + GroupsDelim string `json:"groupsDelim"` + AllowedGroups []string `json:"allowedGroups"` + FilterGroups bool `json:"filterGroups"` + RedirectURI string `json:"redirectURI"` + + // Requested format of the NameID. The NameID value is is mapped to the ID Token + // 'sub' claim. + // + // This can be an abbreviated form of the full URI with just the last component. For + // example, if this value is set to "emailAddress" the format will resolve to: + // + // urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress + // + // If no value is specified, this value defaults to: + // + // urn:oasis:names:tc:SAML:2.0:nameid-format:persistent + // + NameIDPolicyFormat string `json:"nameIDPolicyFormat"` +} + +type certStore struct { + certs []*x509.Certificate +} + +func (c certStore) Certificates() (roots []*x509.Certificate, err error) { + return c.certs, nil +} + +// Open validates the config and returns a connector. It does not actually +// validate connectivity with the provider. +func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { + return c.openConnector(logger) +} + +func (c *Config) openConnector(logger log.Logger) (*provider, error) { + requiredFields := []struct { + name, val string + }{ + {"ssoURL", c.SSOURL}, + {"usernameAttr", c.UsernameAttr}, + {"emailAttr", c.EmailAttr}, + {"redirectURI", c.RedirectURI}, + } + var missing []string + for _, f := range requiredFields { + if f.val == "" { + missing = append(missing, f.name) + } + } + switch len(missing) { + case 0: + case 1: + return nil, fmt.Errorf("missing required field %q", missing[0]) + default: + return nil, fmt.Errorf("missing required fields %q", missing) + } + + p := &provider{ + entityIssuer: c.EntityIssuer, + ssoIssuer: c.SSOIssuer, + ssoURL: c.SSOURL, + now: time.Now, + usernameAttr: c.UsernameAttr, + emailAttr: c.EmailAttr, + groupsAttr: c.GroupsAttr, + groupsDelim: c.GroupsDelim, + allowedGroups: c.AllowedGroups, + filterGroups: c.FilterGroups, + redirectURI: c.RedirectURI, + logger: logger, + + nameIDPolicyFormat: c.NameIDPolicyFormat, + } + + if p.nameIDPolicyFormat == "" { + p.nameIDPolicyFormat = nameIDFormatPersistent + } else { + lookupOnce.Do(func() { + suffix := func(s, sep string) string { + if i := strings.LastIndex(s, sep); i > 0 { + return s[i+1:] + } + return s + } + for _, format := range nameIDFormats { + nameIDFormatLookup[suffix(format, ":")] = format + nameIDFormatLookup[format] = format + } + }) + + if format, ok := nameIDFormatLookup[p.nameIDPolicyFormat]; ok { + p.nameIDPolicyFormat = format + } else { + return nil, fmt.Errorf("invalid nameIDPolicyFormat: %q", p.nameIDPolicyFormat) + } + } + + if !c.InsecureSkipSignatureValidation { + if (c.CA == "") == (c.CAData == nil) { + return nil, errors.New("must provide either 'ca' or 'caData'") + } + + var caData []byte + if c.CA != "" { + data, err := os.ReadFile(c.CA) + if err != nil { + return nil, fmt.Errorf("read ca file: %v", err) + } + caData = data + } else { + caData = c.CAData + } + + var ( + certs []*x509.Certificate + block *pem.Block + ) + for { + block, caData = pem.Decode(caData) + if block == nil { + caData = bytes.TrimSpace(caData) + if len(caData) > 0 { // if there's some left, we've been given bad caData + return nil, fmt.Errorf("parse cert: trailing data: %q", string(caData)) + } + break + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parse cert: %v", err) + } + certs = append(certs, cert) + } + if len(certs) == 0 { + return nil, errors.New("no certificates found in ca data") + } + p.validator = dsig.NewDefaultValidationContext(certStore{certs}) + } + return p, nil +} + +type provider struct { + entityIssuer string + ssoIssuer string + ssoURL string + + now func() time.Time + + // If nil, don't do signature validation. + validator *dsig.ValidationContext + + // Attribute mappings + usernameAttr string + emailAttr string + groupsAttr string + groupsDelim string + allowedGroups []string + filterGroups bool + + redirectURI string + + nameIDPolicyFormat string + + logger log.Logger +} + +func (p *provider) POSTData(s connector.Scopes, id string) (action, value string, err error) { + r := &authnRequest{ + ProtocolBinding: bindingPOST, + ID: id, + IssueInstant: xmlTime(p.now()), + Destination: p.ssoURL, + NameIDPolicy: &nameIDPolicy{ + AllowCreate: true, + Format: p.nameIDPolicyFormat, + }, + AssertionConsumerServiceURL: p.redirectURI, + } + if p.entityIssuer != "" { + // Issuer for the request is optional. For example, okta always ignores + // this value. + r.Issuer = &issuer{Issuer: p.entityIssuer} + } + + data, err := xml.MarshalIndent(r, "", " ") + if err != nil { + return "", "", fmt.Errorf("marshal authn request: %v", err) + } + + // See: https://docs.oasis-open.org/security/saml/v2.0/saml-bindings-2.0-os.pdf + // "3.5.4 Message Encoding" + return p.ssoURL, base64.StdEncoding.EncodeToString(data), nil +} + +// HandlePOST interprets a request from a SAML provider attempting to verify a +// user's identity. +// +// The steps taken are: +// +// * Validate XML document does not contain malicious inputs. +// * Verify signature on XML document (or verify sig on assertion elements). +// * Verify various parts of the Assertion element. Conditions, audience, etc. +// * Map the Assertion's attribute elements to user info. +func (p *provider) HandlePOST(s connector.Scopes, samlResponse, inResponseTo string) (ident connector.Identity, err error) { + rawResp, err := base64.StdEncoding.DecodeString(samlResponse) + if err != nil { + return ident, fmt.Errorf("decode response: %v", err) + } + + byteReader := bytes.NewReader(rawResp) + if xrvErr := xrv.Validate(byteReader); xrvErr != nil { + return ident, errors.Wrap(xrvErr, "validating XML response") + } + + // Root element is allowed to not be signed if the Assertion element is. + rootElementSigned := true + if p.validator != nil { + rawResp, rootElementSigned, err = verifyResponseSig(p.validator, rawResp) + if err != nil { + return ident, fmt.Errorf("verify signature: %v", err) + } + } + + var resp response + if err := xml.Unmarshal(rawResp, &resp); err != nil { + return ident, fmt.Errorf("unmarshal response: %v", err) + } + + // If the root element isn't signed, there's no reason to inspect these + // elements. They're not verified. + if rootElementSigned { + if p.ssoIssuer != "" && resp.Issuer != nil && resp.Issuer.Issuer != p.ssoIssuer { + return ident, fmt.Errorf("expected Issuer value %s, got %s", p.ssoIssuer, resp.Issuer.Issuer) + } + + // Verify InResponseTo value matches the expected ID associated with + // the RelayState. + if resp.InResponseTo != inResponseTo { + return ident, fmt.Errorf("expected InResponseTo value %s, got %s", inResponseTo, resp.InResponseTo) + } + + // Destination is optional. + if resp.Destination != "" && resp.Destination != p.redirectURI { + return ident, fmt.Errorf("expected destination %q got %q", p.redirectURI, resp.Destination) + } + + // Status is a required element. + if resp.Status == nil { + return ident, fmt.Errorf("response did not contain a Status element") + } + + if err = p.validateStatus(resp.Status); err != nil { + return ident, err + } + } + + assertion := resp.Assertion + if assertion == nil { + return ident, fmt.Errorf("response did not contain an assertion") + } + + // Subject is usually optional, but we need it for the user ID, so complain + // if it's not present. + subject := assertion.Subject + if subject == nil { + return ident, fmt.Errorf("response did not contain a subject") + } + + // Validate that the response is to the request we originally sent. + if err = p.validateSubject(subject, inResponseTo); err != nil { + return ident, err + } + + // Conditions element is optional, but must be validated if present. + if assertion.Conditions != nil { + // Validate that dex is the intended audience of this response. + if err = p.validateConditions(assertion.Conditions); err != nil { + return ident, err + } + } + + switch { + case subject.NameID != nil: + if ident.UserID = subject.NameID.Value; ident.UserID == "" { + return ident, fmt.Errorf("element NameID does not contain a value") + } + default: + return ident, fmt.Errorf("subject does not contain an NameID element") + } + + // After verifying the assertion, map data in the attribute statements to + // various user info. + attributes := assertion.AttributeStatement + if attributes == nil { + return ident, fmt.Errorf("response did not contain a AttributeStatement") + } + + // Log the actual attributes we got back from the server. This helps debug + // configuration errors on the server side, where the SAML server doesn't + // send us the correct attributes. + p.logger.Infof("parsed and verified saml response attributes %s", attributes) + + // Grab the email. + if ident.Email, _ = attributes.get(p.emailAttr); ident.Email == "" { + return ident, fmt.Errorf("no attribute with name %q: %s", p.emailAttr, attributes.names()) + } + // TODO(ericchiang): Does SAML have an email_verified equivalent? + ident.EmailVerified = true + + // Grab the username. + if ident.Username, _ = attributes.get(p.usernameAttr); ident.Username == "" { + return ident, fmt.Errorf("no attribute with name %q: %s", p.usernameAttr, attributes.names()) + } + + if len(p.allowedGroups) == 0 && (!s.Groups || p.groupsAttr == "") { + // Groups not requested or not configured. We're done. + return ident, nil + } + + if len(p.allowedGroups) > 0 && (!s.Groups || p.groupsAttr == "") { + // allowedGroups set but no groups or groupsAttr. Disallowing. + return ident, fmt.Errorf("user not a member of allowed groups") + } + + // Grab the groups. + if p.groupsDelim != "" { + groupsStr, ok := attributes.get(p.groupsAttr) + if !ok { + return ident, fmt.Errorf("no attribute with name %q: %s", p.groupsAttr, attributes.names()) + } + // TODO(ericchiang): Do we need to further trim whitespace? + ident.Groups = strings.Split(groupsStr, p.groupsDelim) + } else { + groups, ok := attributes.all(p.groupsAttr) + if !ok { + return ident, fmt.Errorf("no attribute with name %q: %s", p.groupsAttr, attributes.names()) + } + ident.Groups = groups + } + + if len(p.allowedGroups) == 0 { + // No allowed groups set, just return the ident + return ident, nil + } + + // Look for membership in one of the allowed groups + groupMatches := groups.Filter(ident.Groups, p.allowedGroups) + + if len(groupMatches) == 0 { + // No group membership matches found, disallowing + return ident, fmt.Errorf("user not a member of allowed groups") + } + + if p.filterGroups { + ident.Groups = groupMatches + } + + // Otherwise, we're good + return ident, nil +} + +// validateStatus verifies that the response has a good status code or +// formats a human readable error based on the bad status. +func (p *provider) validateStatus(status *status) error { + // StatusCode is mandatory in the Status type + statusCode := status.StatusCode + if statusCode == nil { + return fmt.Errorf("response did not contain a StatusCode") + } + + if statusCode.Value != statusCodeSuccess { + parts := strings.Split(statusCode.Value, ":") + lastPart := parts[len(parts)-1] + errorMessage := fmt.Sprintf("status code of the Response was not Success, was %q", lastPart) + statusMessage := status.StatusMessage + if statusMessage != nil && statusMessage.Value != "" { + errorMessage += " -> " + statusMessage.Value + } + return fmt.Errorf(errorMessage) + } + return nil +} + +// validateSubject ensures the response is to the request we expect. +// +// This is described in the spec "Profiles for the OASIS Security +// Assertion Markup Language" in section 3.3 Bearer. +// see https://www.oasis-open.org/committees/download.php/35389/sstc-saml-profiles-errata-2.0-wd-06-diff.pdf +// +// Some of these fields are optional, but we're going to be strict here since +// we have no other way of guaranteeing that this is actually the response to +// the request we expect. +func (p *provider) validateSubject(subject *subject, inResponseTo string) error { + // Optional according to the spec, but again, we're going to be strict here. + if len(subject.SubjectConfirmations) == 0 { + return fmt.Errorf("subject contained no SubjectConfirmations") + } + + errs := make([]error, 0, len(subject.SubjectConfirmations)) + // One of these must match our assumptions, not all. + for _, c := range subject.SubjectConfirmations { + err := func() error { + if c.Method != subjectConfirmationMethodBearer { + return fmt.Errorf("unexpected subject confirmation method: %v", c.Method) + } + + data := c.SubjectConfirmationData + if data == nil { + return fmt.Errorf("no SubjectConfirmationData field found in SubjectConfirmation") + } + if data.InResponseTo != inResponseTo { + return fmt.Errorf("expected SubjectConfirmationData InResponseTo value %q, got %q", inResponseTo, data.InResponseTo) + } + + notBefore := time.Time(data.NotBefore) + notOnOrAfter := time.Time(data.NotOnOrAfter) + now := p.now() + if !notBefore.IsZero() && before(now, notBefore) { + return fmt.Errorf("at %s got response that cannot be processed before %s", now, notBefore) + } + if !notOnOrAfter.IsZero() && after(now, notOnOrAfter) { + return fmt.Errorf("at %s got response that cannot be processed because it expired at %s", now, notOnOrAfter) + } + if r := data.Recipient; r != "" && r != p.redirectURI { + return fmt.Errorf("expected Recipient %q got %q", p.redirectURI, r) + } + return nil + }() + if err == nil { + // Subject is valid. + return nil + } + errs = append(errs, err) + } + + if len(errs) == 1 { + return fmt.Errorf("failed to validate subject confirmation: %v", errs[0]) + } + return fmt.Errorf("failed to validate subject confirmation: %v", errs) +} + +// validationConditions ensures that dex is the intended audience +// for the request, and not another service provider. +// +// See: https://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf +// "2.3.3 Element " +func (p *provider) validateConditions(conditions *conditions) error { + // Ensure the conditions haven't expired. + now := p.now() + notBefore := time.Time(conditions.NotBefore) + if !notBefore.IsZero() && before(now, notBefore) { + return fmt.Errorf("at %s got response that cannot be processed before %s", now, notBefore) + } + + notOnOrAfter := time.Time(conditions.NotOnOrAfter) + if !notOnOrAfter.IsZero() && after(now, notOnOrAfter) { + return fmt.Errorf("at %s got response that cannot be processed because it expired at %s", now, notOnOrAfter) + } + + // Sometimes, dex's issuer string can be different than the redirect URI, + // but if dex's issuer isn't explicitly provided assume the redirect URI. + expAud := p.entityIssuer + if expAud == "" { + expAud = p.redirectURI + } + + // AudienceRestriction elements indicate the intended audience(s) of an + // assertion. If dex isn't in these audiences, reject the assertion. + // + // Note that if there are multiple AudienceRestriction elements, each must + // individually contain dex in their audience list. + for _, r := range conditions.AudienceRestriction { + values := make([]string, len(r.Audiences)) + issuerInAudiences := false + for i, aud := range r.Audiences { + if aud.Value == expAud { + issuerInAudiences = true + break + } + values[i] = aud.Value + } + + if !issuerInAudiences { + return fmt.Errorf("required audience %s was not in Response audiences %s", expAud, values) + } + } + return nil +} + +// verifyResponseSig attempts to verify the signature of a SAML response or +// the assertion. +// +// If the root element is properly signed, this method returns it. +// +// The SAML spec requires supporting responses where the root element is +// unverified, but the sub elements are signed. In these cases, +// this method returns rootVerified=false to indicate that the +// elements should be trusted, but all other elements MUST be ignored. +// +// Note: we still don't support multiple tags. If there are +// multiple present this code will only process the first. +func verifyResponseSig(validator *dsig.ValidationContext, data []byte) (signed []byte, rootVerified bool, err error) { + doc := etree.NewDocument() + if err = doc.ReadFromBytes(data); err != nil { + return nil, false, fmt.Errorf("parse document: %v", err) + } + + response := doc.Root() + transformedResponse, err := validator.Validate(response) + if err == nil { + // Root element is verified, return it. + doc.SetRoot(transformedResponse) + signed, err = doc.WriteToBytes() + return signed, true, err + } + + // Ensures xmlns are copied down to the assertion element when they are defined in the root + // + // TODO: Only select from child elements of the root. + assertion, err := etreeutils.NSSelectOne(response, "urn:oasis:names:tc:SAML:2.0:assertion", "Assertion") + if err != nil { + return nil, false, fmt.Errorf("response does not contain an Assertion element") + } + transformedAssertion, err := validator.Validate(assertion) + if err != nil { + return nil, false, fmt.Errorf("response does not contain a valid signature element: %v", err) + } + + // Verified an assertion but not the response. Can't trust any child elements, + // except the assertion. Remove them all. + for _, el := range response.ChildElements() { + response.RemoveChild(el) + } + + // We still return the full element, even though it's unverified + // because the element is not a valid XML document on its own. + // It still requires the root element to define things like namespaces. + response.AddChild(transformedAssertion) + signed, err = doc.WriteToBytes() + return signed, false, err +} + +// before determines if a given time is before the current time, with an +// allowed clock drift. +func before(now, notBefore time.Time) bool { + return now.Add(allowedClockDrift).Before(notBefore) +} + +// after determines if a given time is after the current time, with an +// allowed clock drift. +func after(now, notOnOrAfter time.Time) bool { + return now.After(notOnOrAfter.Add(allowedClockDrift)) +} diff --git a/vendor/github.com/dexidp/dex/connector/saml/types.go b/vendor/github.com/dexidp/dex/connector/saml/types.go new file mode 100644 index 00000000..c8d7e7f3 --- /dev/null +++ b/vendor/github.com/dexidp/dex/connector/saml/types.go @@ -0,0 +1,277 @@ +package saml + +import ( + "bytes" + "encoding/xml" + "fmt" + "time" +) + +const timeFormat = "2006-01-02T15:04:05Z" + +type xmlTime time.Time + +func (t xmlTime) MarshalXMLAttr(name xml.Name) (xml.Attr, error) { + return xml.Attr{ + Name: name, + Value: time.Time(t).UTC().Format(timeFormat), + }, nil +} + +func (t *xmlTime) UnmarshalXMLAttr(attr xml.Attr) error { + got, err := time.Parse(timeFormat, attr.Value) + if err != nil { + return err + } + *t = xmlTime(got) + return nil +} + +type samlVersion struct{} + +func (s samlVersion) MarshalXMLAttr(name xml.Name) (xml.Attr, error) { + return xml.Attr{ + Name: name, + Value: "2.0", + }, nil +} + +func (s *samlVersion) UnmarshalXMLAttr(attr xml.Attr) error { + if attr.Value != "2.0" { + return fmt.Errorf(`saml version expected "2.0" got %q`, attr.Value) + } + return nil +} + +type authnRequest struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol AuthnRequest"` + + ID string `xml:"ID,attr"` + Version samlVersion `xml:"Version,attr"` + + ProviderName string `xml:"ProviderName,attr,omitempty"` + IssueInstant xmlTime `xml:"IssueInstant,attr,omitempty"` + Consent bool `xml:"Consent,attr,omitempty"` + Destination string `xml:"Destination,attr,omitempty"` + + ForceAuthn bool `xml:"ForceAuthn,attr,omitempty"` + IsPassive bool `xml:"IsPassive,attr,omitempty"` + ProtocolBinding string `xml:"ProtocolBinding,attr,omitempty"` + + AssertionConsumerServiceURL string `xml:"AssertionConsumerServiceURL,attr,omitempty"` + + Subject *subject `xml:"Subject,omitempty"` + Issuer *issuer `xml:"Issuer,omitempty"` + NameIDPolicy *nameIDPolicy `xml:"NameIDPolicy,omitempty"` + + // TODO(ericchiang): Make this configurable and determine appropriate default values. + RequestAuthnContext *requestAuthnContext `xml:"RequestAuthnContext,omitempty"` +} + +type subject struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Subject"` + + NameID *nameID `xml:"NameID,omitempty"` + SubjectConfirmations []subjectConfirmation `xml:"SubjectConfirmation"` + + // TODO(ericchiang): Do we need to deal with baseID? +} + +type nameID struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion NameID"` + + Format string `xml:"Format,omitempty"` + Value string `xml:",chardata"` +} + +type subjectConfirmationData struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion SubjectConfirmationData"` + + NotBefore xmlTime `xml:"NotBefore,attr,omitempty"` + NotOnOrAfter xmlTime `xml:"NotOnOrAfter,attr,omitempty"` + Recipient string `xml:"Recipient,attr,omitempty"` + InResponseTo string `xml:"InResponseTo,attr,omitempty"` +} + +type subjectConfirmation struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion SubjectConfirmation"` + + Method string `xml:"Method,attr,omitempty"` + SubjectConfirmationData *subjectConfirmationData `xml:"SubjectConfirmationData,omitempty"` +} + +type audience struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Audience"` + Value string `xml:",chardata"` +} + +type audienceRestriction struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion AudienceRestriction"` + + Audiences []audience `xml:"Audience"` +} + +type conditions struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Conditions"` + + NotBefore xmlTime `xml:"NotBefore,attr,omitempty"` + NotOnOrAfter xmlTime `xml:"NotOnOrAfter,attr,omitempty"` + + AudienceRestriction []audienceRestriction `xml:"AudienceRestriction,omitempty"` +} + +type statusCode struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol StatusCode"` + + Value string `xml:"Value,attr,omitempty"` +} + +type statusMessage struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol StatusMessage"` + + Value string `xml:",chardata"` +} + +type status struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol Status"` + + StatusCode *statusCode `xml:"StatusCode"` + StatusMessage *statusMessage `xml:"StatusMessage,omitempty"` +} + +type issuer struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Issuer"` + Issuer string `xml:",chardata"` +} + +type nameIDPolicy struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol NameIDPolicy"` + AllowCreate bool `xml:"AllowCreate,attr,omitempty"` + Format string `xml:"Format,attr,omitempty"` +} + +type requestAuthnContext struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol RequestAuthnContext"` + + AuthnContextClassRefs []authnContextClassRef +} + +type authnContextClassRef struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol AuthnContextClassRef"` + Value string `xml:",chardata"` +} + +type response struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol Response"` + + ID string `xml:"ID,attr"` + InResponseTo string `xml:"InResponseTo,attr"` + Version samlVersion `xml:"Version,attr"` + + Destination string `xml:"Destination,attr,omitempty"` + + Issuer *issuer `xml:"Issuer,omitempty"` + + Status *status `xml:"Status"` + + // TODO(ericchiang): How do deal with multiple assertions? + Assertion *assertion `xml:"Assertion,omitempty"` +} + +type assertion struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Assertion"` + + Version samlVersion `xml:"Version,attr"` + ID string `xml:"ID,attr"` + IssueInstance xmlTime `xml:"IssueInstance,attr"` + + Issuer issuer `xml:"Issuer"` + + Subject *subject `xml:"Subject,omitempty"` + + Conditions *conditions `xml:"Conditions"` + + AttributeStatement *attributeStatement `xml:"AttributeStatement,omitempty"` +} + +type attributeStatement struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion AttributeStatement"` + + Attributes []attribute `xml:"Attribute"` +} + +func (a *attributeStatement) get(name string) (s string, ok bool) { + for _, attr := range a.Attributes { + if attr.Name == name { + ok = true + if len(attr.AttributeValues) > 0 { + return attr.AttributeValues[0].Value, true + } + } + } + return +} + +func (a *attributeStatement) all(name string) (s []string, ok bool) { + for _, attr := range a.Attributes { + if attr.Name == name { + ok = true + for _, val := range attr.AttributeValues { + s = append(s, val.Value) + } + } + } + return +} + +// names list the names of all attributes in the attribute statement. +func (a *attributeStatement) names() []string { + s := make([]string, len(a.Attributes)) + + for i, attr := range a.Attributes { + s[i] = attr.Name + } + return s +} + +// String is a formatter for logging an attribute statement's sub statements. +func (a *attributeStatement) String() string { + buff := new(bytes.Buffer) + for i, attr := range a.Attributes { + if i != 0 { + buff.WriteString(", ") + } + buff.WriteString(attr.String()) + } + return buff.String() +} + +type attribute struct { + XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Attribute"` + + Name string `xml:"Name,attr"` + + NameFormat string `xml:"NameFormat,attr,omitempty"` + FriendlyName string `xml:"FriendlyName,attr,omitempty"` + + AttributeValues []attributeValue `xml:"AttributeValue,omitempty"` +} + +type attributeValue struct { + XMLName xml.Name `xml:"AttributeValue"` + Value string `xml:",chardata"` +} + +func (a attribute) String() string { + if len(a.AttributeValues) == 1 { + // "email" = "jane.doe@coreos.com" + return fmt.Sprintf("%q = %q", a.Name, a.AttributeValues[0].Value) + } + values := make([]string, len(a.AttributeValues)) + for i, av := range a.AttributeValues { + values[i] = av.Value + } + + // "groups" = ["engineering", "docs"] + return fmt.Sprintf("%q = %q", a.Name, values) +} diff --git a/vendor/github.com/dexidp/dex/pkg/groups/BUILD b/vendor/github.com/dexidp/dex/pkg/groups/BUILD new file mode 100644 index 00000000..b7b9eb5e --- /dev/null +++ b/vendor/github.com/dexidp/dex/pkg/groups/BUILD @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "groups", + srcs = ["groups.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/pkg/groups", + importpath = "github.com/dexidp/dex/pkg/groups", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/dexidp/dex/pkg/groups/groups.go b/vendor/github.com/dexidp/dex/pkg/groups/groups.go new file mode 100644 index 00000000..5dde65ab --- /dev/null +++ b/vendor/github.com/dexidp/dex/pkg/groups/groups.go @@ -0,0 +1,18 @@ +// Package groups contains helper functions related to groups +package groups + +// Filter filters out any groups of given that are not in required. Thus it may +// happen that the resulting slice is empty. +func Filter(given, required []string) []string { + groups := []string{} + groupFilter := make(map[string]struct{}) + for _, group := range required { + groupFilter[group] = struct{}{} + } + for _, group := range given { + if _, ok := groupFilter[group]; ok { + groups = append(groups, group) + } + } + return groups +} diff --git a/vendor/github.com/dexidp/dex/pkg/httpclient/BUILD b/vendor/github.com/dexidp/dex/pkg/httpclient/BUILD new file mode 100644 index 00000000..4b3c7003 --- /dev/null +++ b/vendor/github.com/dexidp/dex/pkg/httpclient/BUILD @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "httpclient", + srcs = ["httpclient.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/pkg/httpclient", + importpath = "github.com/dexidp/dex/pkg/httpclient", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/dexidp/dex/pkg/httpclient/httpclient.go b/vendor/github.com/dexidp/dex/pkg/httpclient/httpclient.go new file mode 100644 index 00000000..04837a7d --- /dev/null +++ b/vendor/github.com/dexidp/dex/pkg/httpclient/httpclient.go @@ -0,0 +1,45 @@ +package httpclient + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "os" + "time" +) + +func NewHTTPClient(rootCAs []string, insecureSkipVerify bool) (*http.Client, error) { + pool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + + tlsConfig := tls.Config{RootCAs: pool, InsecureSkipVerify: insecureSkipVerify} + for _, rootCA := range rootCAs { + rootCABytes, err := os.ReadFile(rootCA) + if err != nil { + return nil, fmt.Errorf("failed to read root-ca: %v", err) + } + if !tlsConfig.RootCAs.AppendCertsFromPEM(rootCABytes) { + return nil, fmt.Errorf("no certs found in root CA file %q", rootCA) + } + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tlsConfig, + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, + }, nil +} diff --git a/vendor/github.com/dexidp/dex/pkg/httpclient/readme.md b/vendor/github.com/dexidp/dex/pkg/httpclient/readme.md new file mode 100644 index 00000000..cc262522 --- /dev/null +++ b/vendor/github.com/dexidp/dex/pkg/httpclient/readme.md @@ -0,0 +1,44 @@ +# Regenerate testdata + +### server.csr.cnf + +``` +[req] +default_bits = 2048 +prompt = no +default_md = sha256 +distinguished_name = dn + +[dn] +C=US +ST=RandomState +L=RandomCity +O=RandomOrganization +OU=RandomOrganizationUnit +emailAddress=hello@example.com +CN = localhost +``` + +and + +### v3.ext +``` +authorityKeyIdentifier=keyid,issuer +basicConstraints=CA:FALSE +keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment +subjectAltName = @alt_names + +[alt_names] +DNS.1 = localhost +IP.1 = 127.0.0.1 +``` + +### Then enter the following commands: + +`openssl genrsa -out rootCA.key 2048` + +`openssl req -x509 -new -nodes -key rootCA.key -sha256 -days 3650 -out rootCA.pem -config server.csr.cnf` + +`openssl req -new -sha256 -nodes -out server.csr -newkey rsa:2048 -keyout server.key -config server.csr.cnf` + +`openssl x509 -req -in server.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out server.crt -days 3650 -sha256 -extfile v3.ext` diff --git a/vendor/github.com/dexidp/dex/pkg/log/BUILD b/vendor/github.com/dexidp/dex/pkg/log/BUILD new file mode 100644 index 00000000..940ef6a6 --- /dev/null +++ b/vendor/github.com/dexidp/dex/pkg/log/BUILD @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "log", + srcs = [ + "deprecated.go", + "logger.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/pkg/log", + importpath = "github.com/dexidp/dex/pkg/log", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/dexidp/dex/pkg/log/deprecated.go b/vendor/github.com/dexidp/dex/pkg/log/deprecated.go new file mode 100644 index 00000000..f20e8b4c --- /dev/null +++ b/vendor/github.com/dexidp/dex/pkg/log/deprecated.go @@ -0,0 +1,5 @@ +package log + +func Deprecated(logger Logger, f string, args ...interface{}) { + logger.Warnf("Deprecated: "+f, args...) +} diff --git a/vendor/github.com/dexidp/dex/pkg/log/logger.go b/vendor/github.com/dexidp/dex/pkg/log/logger.go new file mode 100644 index 00000000..4f3cdd38 --- /dev/null +++ b/vendor/github.com/dexidp/dex/pkg/log/logger.go @@ -0,0 +1,18 @@ +// Package log provides a logger interface for logger libraries +// so that dex does not depend on any of them directly. +// It also includes a default implementation using Logrus (used by dex previously). +package log + +// Logger serves as an adapter interface for logger libraries +// so that dex does not depend on any of them directly. +type Logger interface { + Debug(args ...interface{}) + Info(args ...interface{}) + Warn(args ...interface{}) + Error(args ...interface{}) + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) +} diff --git a/vendor/github.com/dexidp/dex/server/BUILD b/vendor/github.com/dexidp/dex/server/BUILD new file mode 100644 index 00000000..be1aeac8 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/BUILD @@ -0,0 +1,53 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "server", + srcs = [ + "api.go", + "deviceflowhandlers.go", + "doc.go", + "handlers.go", + "oauth2.go", + "refreshhandlers.go", + "rotation.go", + "server.go", + "templates.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/server", + importpath = "github.com/dexidp/dex/server", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/AppsFlyer/go-sundheit", + "//vendor/github.com/Masterminds/sprig/v3:sprig", + "//vendor/github.com/coreos/go-oidc/v3/oidc", + "//vendor/github.com/dexidp/dex/api/v2:api", + "//vendor/github.com/dexidp/dex/connector", + "//vendor/github.com/dexidp/dex/connector/atlassiancrowd", + "//vendor/github.com/dexidp/dex/connector/authproxy", + "//vendor/github.com/dexidp/dex/connector/bitbucketcloud", + "//vendor/github.com/dexidp/dex/connector/gitea", + "//vendor/github.com/dexidp/dex/connector/github", + "//vendor/github.com/dexidp/dex/connector/gitlab", + "//vendor/github.com/dexidp/dex/connector/google", + "//vendor/github.com/dexidp/dex/connector/keystone", + "//vendor/github.com/dexidp/dex/connector/ldap", + "//vendor/github.com/dexidp/dex/connector/linkedin", + "//vendor/github.com/dexidp/dex/connector/microsoft", + "//vendor/github.com/dexidp/dex/connector/mock", + "//vendor/github.com/dexidp/dex/connector/oauth", + "//vendor/github.com/dexidp/dex/connector/oidc", + "//vendor/github.com/dexidp/dex/connector/openshift", + "//vendor/github.com/dexidp/dex/connector/saml", + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/github.com/dexidp/dex/server/internal", + "//vendor/github.com/dexidp/dex/storage", + "//vendor/github.com/dexidp/dex/web", + "//vendor/github.com/felixge/httpsnoop", + "//vendor/github.com/gorilla/handlers", + "//vendor/github.com/gorilla/mux", + "//vendor/github.com/prometheus/client_golang/prometheus", + "//vendor/golang.org/x/crypto/bcrypt", + "//vendor/gopkg.in/square/go-jose.v2:go-jose_v2", + "@org_golang_x_net//html", + ], +) diff --git a/vendor/github.com/dexidp/dex/server/api.go b/vendor/github.com/dexidp/dex/server/api.go new file mode 100644 index 00000000..d8ca1831 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/api.go @@ -0,0 +1,387 @@ +package server + +import ( + "context" + "errors" + "fmt" + + "golang.org/x/crypto/bcrypt" + + "github.com/dexidp/dex/api/v2" + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/server/internal" + "github.com/dexidp/dex/storage" +) + +// apiVersion increases every time a new call is added to the API. Clients should use this info +// to determine if the server supports specific features. +const apiVersion = 2 + +const ( + // recCost is the recommended bcrypt cost, which balances hash strength and + // efficiency. + recCost = 12 + + // upBoundCost is a sane upper bound on bcrypt cost determined by benchmarking: + // high enough to ensure secure encryption, low enough to not put unnecessary + // load on a dex server. + upBoundCost = 16 +) + +// NewAPI returns a server which implements the gRPC API interface. +func NewAPI(s storage.Storage, logger log.Logger, version string) api.DexServer { + return dexAPI{ + s: s, + logger: logger, + version: version, + } +} + +type dexAPI struct { + api.UnimplementedDexServer + + s storage.Storage + logger log.Logger + version string +} + +func (d dexAPI) GetClient(ctx context.Context, req *api.GetClientReq) (*api.GetClientResp, error) { + c, err := d.s.GetClient(req.Id) + if err != nil { + return nil, err + } + + return &api.GetClientResp{ + Client: &api.Client{ + Id: c.ID, + Name: c.Name, + Secret: c.Secret, + RedirectUris: c.RedirectURIs, + TrustedPeers: c.TrustedPeers, + Public: c.Public, + LogoUrl: c.LogoURL, + }, + }, nil +} + +func (d dexAPI) CreateClient(ctx context.Context, req *api.CreateClientReq) (*api.CreateClientResp, error) { + if req.Client == nil { + return nil, errors.New("no client supplied") + } + + if req.Client.Id == "" { + req.Client.Id = storage.NewID() + } + if req.Client.Secret == "" && !req.Client.Public { + req.Client.Secret = storage.NewID() + storage.NewID() + } + + c := storage.Client{ + ID: req.Client.Id, + Secret: req.Client.Secret, + RedirectURIs: req.Client.RedirectUris, + TrustedPeers: req.Client.TrustedPeers, + Public: req.Client.Public, + Name: req.Client.Name, + LogoURL: req.Client.LogoUrl, + } + if err := d.s.CreateClient(c); err != nil { + if err == storage.ErrAlreadyExists { + return &api.CreateClientResp{AlreadyExists: true}, nil + } + d.logger.Errorf("api: failed to create client: %v", err) + return nil, fmt.Errorf("create client: %v", err) + } + + return &api.CreateClientResp{ + Client: req.Client, + }, nil +} + +func (d dexAPI) UpdateClient(ctx context.Context, req *api.UpdateClientReq) (*api.UpdateClientResp, error) { + if req.Id == "" { + return nil, errors.New("update client: no client ID supplied") + } + + err := d.s.UpdateClient(req.Id, func(old storage.Client) (storage.Client, error) { + if req.RedirectUris != nil { + old.RedirectURIs = req.RedirectUris + } + if req.TrustedPeers != nil { + old.TrustedPeers = req.TrustedPeers + } + if req.Name != "" { + old.Name = req.Name + } + if req.LogoUrl != "" { + old.LogoURL = req.LogoUrl + } + return old, nil + }) + if err != nil { + if err == storage.ErrNotFound { + return &api.UpdateClientResp{NotFound: true}, nil + } + d.logger.Errorf("api: failed to update the client: %v", err) + return nil, fmt.Errorf("update client: %v", err) + } + return &api.UpdateClientResp{}, nil +} + +func (d dexAPI) DeleteClient(ctx context.Context, req *api.DeleteClientReq) (*api.DeleteClientResp, error) { + err := d.s.DeleteClient(req.Id) + if err != nil { + if err == storage.ErrNotFound { + return &api.DeleteClientResp{NotFound: true}, nil + } + d.logger.Errorf("api: failed to delete client: %v", err) + return nil, fmt.Errorf("delete client: %v", err) + } + return &api.DeleteClientResp{}, nil +} + +// checkCost returns an error if the hash provided does not meet lower or upper +// bound cost requirements. +func checkCost(hash []byte) error { + actual, err := bcrypt.Cost(hash) + if err != nil { + return fmt.Errorf("parsing bcrypt hash: %v", err) + } + if actual < bcrypt.DefaultCost { + return fmt.Errorf("given hash cost = %d does not meet minimum cost requirement = %d", actual, bcrypt.DefaultCost) + } + if actual > upBoundCost { + return fmt.Errorf("given hash cost = %d is above upper bound cost = %d, recommended cost = %d", actual, upBoundCost, recCost) + } + return nil +} + +func (d dexAPI) CreatePassword(ctx context.Context, req *api.CreatePasswordReq) (*api.CreatePasswordResp, error) { + if req.Password == nil { + return nil, errors.New("no password supplied") + } + if req.Password.UserId == "" { + return nil, errors.New("no user ID supplied") + } + if req.Password.Hash != nil { + if err := checkCost(req.Password.Hash); err != nil { + return nil, err + } + } else { + return nil, errors.New("no hash of password supplied") + } + + p := storage.Password{ + Email: req.Password.Email, + Hash: req.Password.Hash, + Username: req.Password.Username, + UserID: req.Password.UserId, + } + if err := d.s.CreatePassword(p); err != nil { + if err == storage.ErrAlreadyExists { + return &api.CreatePasswordResp{AlreadyExists: true}, nil + } + d.logger.Errorf("api: failed to create password: %v", err) + return nil, fmt.Errorf("create password: %v", err) + } + + return &api.CreatePasswordResp{}, nil +} + +func (d dexAPI) UpdatePassword(ctx context.Context, req *api.UpdatePasswordReq) (*api.UpdatePasswordResp, error) { + if req.Email == "" { + return nil, errors.New("no email supplied") + } + if req.NewHash == nil && req.NewUsername == "" { + return nil, errors.New("nothing to update") + } + + if req.NewHash != nil { + if err := checkCost(req.NewHash); err != nil { + return nil, err + } + } + + updater := func(old storage.Password) (storage.Password, error) { + if req.NewHash != nil { + old.Hash = req.NewHash + } + + if req.NewUsername != "" { + old.Username = req.NewUsername + } + + return old, nil + } + + if err := d.s.UpdatePassword(req.Email, updater); err != nil { + if err == storage.ErrNotFound { + return &api.UpdatePasswordResp{NotFound: true}, nil + } + d.logger.Errorf("api: failed to update password: %v", err) + return nil, fmt.Errorf("update password: %v", err) + } + + return &api.UpdatePasswordResp{}, nil +} + +func (d dexAPI) DeletePassword(ctx context.Context, req *api.DeletePasswordReq) (*api.DeletePasswordResp, error) { + if req.Email == "" { + return nil, errors.New("no email supplied") + } + + err := d.s.DeletePassword(req.Email) + if err != nil { + if err == storage.ErrNotFound { + return &api.DeletePasswordResp{NotFound: true}, nil + } + d.logger.Errorf("api: failed to delete password: %v", err) + return nil, fmt.Errorf("delete password: %v", err) + } + return &api.DeletePasswordResp{}, nil +} + +func (d dexAPI) GetVersion(ctx context.Context, req *api.VersionReq) (*api.VersionResp, error) { + return &api.VersionResp{ + Server: d.version, + Api: apiVersion, + }, nil +} + +func (d dexAPI) ListPasswords(ctx context.Context, req *api.ListPasswordReq) (*api.ListPasswordResp, error) { + passwordList, err := d.s.ListPasswords() + if err != nil { + d.logger.Errorf("api: failed to list passwords: %v", err) + return nil, fmt.Errorf("list passwords: %v", err) + } + + passwords := make([]*api.Password, 0, len(passwordList)) + for _, password := range passwordList { + p := api.Password{ + Email: password.Email, + Username: password.Username, + UserId: password.UserID, + } + passwords = append(passwords, &p) + } + + return &api.ListPasswordResp{ + Passwords: passwords, + }, nil +} + +func (d dexAPI) VerifyPassword(ctx context.Context, req *api.VerifyPasswordReq) (*api.VerifyPasswordResp, error) { + if req.Email == "" { + return nil, errors.New("no email supplied") + } + + if req.Password == "" { + return nil, errors.New("no password to verify supplied") + } + + password, err := d.s.GetPassword(req.Email) + if err != nil { + if err == storage.ErrNotFound { + return &api.VerifyPasswordResp{ + NotFound: true, + }, nil + } + d.logger.Errorf("api: there was an error retrieving the password: %v", err) + return nil, fmt.Errorf("verify password: %v", err) + } + + if err := bcrypt.CompareHashAndPassword(password.Hash, []byte(req.Password)); err != nil { + d.logger.Infof("api: password check failed: %v", err) + return &api.VerifyPasswordResp{ + Verified: false, + }, nil + } + return &api.VerifyPasswordResp{ + Verified: true, + }, nil +} + +func (d dexAPI) ListRefresh(ctx context.Context, req *api.ListRefreshReq) (*api.ListRefreshResp, error) { + id := new(internal.IDTokenSubject) + if err := internal.Unmarshal(req.UserId, id); err != nil { + d.logger.Errorf("api: failed to unmarshal ID Token subject: %v", err) + return nil, err + } + + offlineSessions, err := d.s.GetOfflineSessions(id.UserId, id.ConnId) + if err != nil { + if err == storage.ErrNotFound { + // This means that this user-client pair does not have a refresh token yet. + // An empty list should be returned instead of an error. + return &api.ListRefreshResp{}, nil + } + d.logger.Errorf("api: failed to list refresh tokens %t here : %v", err == storage.ErrNotFound, err) + return nil, err + } + + refreshTokenRefs := make([]*api.RefreshTokenRef, 0, len(offlineSessions.Refresh)) + for _, session := range offlineSessions.Refresh { + r := api.RefreshTokenRef{ + Id: session.ID, + ClientId: session.ClientID, + CreatedAt: session.CreatedAt.Unix(), + LastUsed: session.LastUsed.Unix(), + } + refreshTokenRefs = append(refreshTokenRefs, &r) + } + + return &api.ListRefreshResp{ + RefreshTokens: refreshTokenRefs, + }, nil +} + +func (d dexAPI) RevokeRefresh(ctx context.Context, req *api.RevokeRefreshReq) (*api.RevokeRefreshResp, error) { + id := new(internal.IDTokenSubject) + if err := internal.Unmarshal(req.UserId, id); err != nil { + d.logger.Errorf("api: failed to unmarshal ID Token subject: %v", err) + return nil, err + } + + var ( + refreshID string + notFound bool + ) + updater := func(old storage.OfflineSessions) (storage.OfflineSessions, error) { + refreshRef := old.Refresh[req.ClientId] + if refreshRef == nil || refreshRef.ID == "" { + d.logger.Errorf("api: refresh token issued to client %q for user %q not found for deletion", req.ClientId, id.UserId) + notFound = true + return old, storage.ErrNotFound + } + + refreshID = refreshRef.ID + + // Remove entry from Refresh list of the OfflineSession object. + delete(old.Refresh, req.ClientId) + + return old, nil + } + + if err := d.s.UpdateOfflineSessions(id.UserId, id.ConnId, updater); err != nil { + if err == storage.ErrNotFound { + return &api.RevokeRefreshResp{NotFound: true}, nil + } + d.logger.Errorf("api: failed to update offline session object: %v", err) + return nil, err + } + + if notFound { + return &api.RevokeRefreshResp{NotFound: true}, nil + } + + // Delete the refresh token from the storage + // + // TODO(ericchiang): we don't have any good recourse if this call fails. + // Consider garbage collection of refresh tokens with no associated ref. + if err := d.s.DeleteRefresh(refreshID); err != nil { + d.logger.Errorf("failed to delete refresh token: %v", err) + return nil, err + } + + return &api.RevokeRefreshResp{}, nil +} diff --git a/vendor/github.com/dexidp/dex/server/deviceflowhandlers.go b/vendor/github.com/dexidp/dex/server/deviceflowhandlers.go new file mode 100644 index 00000000..95fed3b3 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/deviceflowhandlers.go @@ -0,0 +1,444 @@ +package server + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "golang.org/x/net/html" + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" +) + +type deviceCodeResponse struct { + // The unique device code for device authentication + DeviceCode string `json:"device_code"` + // The code the user will exchange via a browser and log in + UserCode string `json:"user_code"` + // The url to verify the user code. + VerificationURI string `json:"verification_uri"` + // The verification uri with the user code appended for pre-filling form + VerificationURIComplete string `json:"verification_uri_complete"` + // The lifetime of the device code + ExpireTime int `json:"expires_in"` + // How often the device is allowed to poll to verify that the user login occurred + PollInterval int `json:"interval"` +} + +func (s *Server) getDeviceVerificationURI() string { + return path.Join(s.issuerURL.Path, "/device/auth/verify_code") +} + +func (s *Server) handleDeviceExchange(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + // Grab the parameter(s) from the query. + // If "user_code" is set, pre-populate the user code text field. + // If "invalid" is set, set the invalidAttempt boolean, which will display a message to the user that they + // attempted to redeem an invalid or expired user code. + userCode := r.URL.Query().Get("user_code") + invalidAttempt, err := strconv.ParseBool(r.URL.Query().Get("invalid")) + if err != nil { + invalidAttempt = false + } + if err := s.templates.device(r, w, s.getDeviceVerificationURI(), userCode, invalidAttempt); err != nil { + s.logger.Errorf("Server template error: %v", err) + s.renderError(r, w, http.StatusNotFound, "Page not found") + } + default: + s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.") + } +} + +func (s *Server) handleDeviceCode(w http.ResponseWriter, r *http.Request) { + pollIntervalSeconds := 5 + + switch r.Method { + case http.MethodPost: + err := r.ParseForm() + if err != nil { + s.logger.Errorf("Could not parse Device Request body: %v", err) + s.tokenErrHelper(w, errInvalidRequest, "", http.StatusNotFound) + return + } + + // Get the client id and scopes from the post + clientID := r.Form.Get("client_id") + clientSecret := r.Form.Get("client_secret") + scopes := strings.Fields(r.Form.Get("scope")) + codeChallenge := r.Form.Get("code_challenge") + codeChallengeMethod := r.Form.Get("code_challenge_method") + + if codeChallengeMethod == "" { + codeChallengeMethod = codeChallengeMethodPlain + } + if codeChallengeMethod != codeChallengeMethodS256 && codeChallengeMethod != codeChallengeMethodPlain { + description := fmt.Sprintf("Unsupported PKCE challenge method (%q).", codeChallengeMethod) + s.tokenErrHelper(w, errInvalidRequest, description, http.StatusBadRequest) + return + } + + s.logger.Infof("Received device request for client %v with scopes %v", clientID, scopes) + + // Make device code + deviceCode := storage.NewDeviceCode() + + // make user code + userCode := storage.NewUserCode() + + // Generate the expire time + expireTime := time.Now().Add(s.deviceRequestsValidFor) + + // Store the Device Request + deviceReq := storage.DeviceRequest{ + UserCode: userCode, + DeviceCode: deviceCode, + ClientID: clientID, + ClientSecret: clientSecret, + Scopes: scopes, + Expiry: expireTime, + } + + if err := s.storage.CreateDeviceRequest(deviceReq); err != nil { + s.logger.Errorf("Failed to store device request; %v", err) + s.tokenErrHelper(w, errInvalidRequest, "", http.StatusInternalServerError) + return + } + + // Store the device token + deviceToken := storage.DeviceToken{ + DeviceCode: deviceCode, + Status: deviceTokenPending, + Expiry: expireTime, + LastRequestTime: s.now(), + PollIntervalSeconds: 0, + PKCE: storage.PKCE{ + CodeChallenge: codeChallenge, + CodeChallengeMethod: codeChallengeMethod, + }, + } + + if err := s.storage.CreateDeviceToken(deviceToken); err != nil { + s.logger.Errorf("Failed to store device token %v", err) + s.tokenErrHelper(w, errInvalidRequest, "", http.StatusInternalServerError) + return + } + + u, err := url.Parse(s.issuerURL.String()) + if err != nil { + s.logger.Errorf("Could not parse issuer URL %v", err) + s.tokenErrHelper(w, errInvalidRequest, "", http.StatusInternalServerError) + return + } + u.Path = path.Join(u.Path, "device") + vURI := u.String() + + q := u.Query() + q.Set("user_code", userCode) + u.RawQuery = q.Encode() + vURIComplete := u.String() + + code := deviceCodeResponse{ + DeviceCode: deviceCode, + UserCode: userCode, + VerificationURI: vURI, + VerificationURIComplete: vURIComplete, + ExpireTime: int(s.deviceRequestsValidFor.Seconds()), + PollInterval: pollIntervalSeconds, + } + + // Device Authorization Response can contain cache control header according to + // https://tools.ietf.org/html/rfc8628#section-3.2 + w.Header().Set("Cache-Control", "no-store") + + // Response type should be application/json according to + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 + w.Header().Set("Content-Type", "application/json") + + enc := json.NewEncoder(w) + enc.SetEscapeHTML(false) + enc.SetIndent("", " ") + enc.Encode(code) + + default: + s.renderError(r, w, http.StatusBadRequest, "Invalid device code request type") + s.tokenErrHelper(w, errInvalidRequest, "", http.StatusBadRequest) + } +} + +func (s *Server) handleDeviceTokenDeprecated(w http.ResponseWriter, r *http.Request) { + log.Deprecated(s.logger, `The /device/token endpoint was called. It will be removed, use /token instead.`) + + w.Header().Set("Content-Type", "application/json") + switch r.Method { + case http.MethodPost: + err := r.ParseForm() + if err != nil { + s.logger.Warnf("Could not parse Device Token Request body: %v", err) + s.tokenErrHelper(w, errInvalidRequest, "", http.StatusBadRequest) + return + } + + grantType := r.PostFormValue("grant_type") + if grantType != grantTypeDeviceCode { + s.tokenErrHelper(w, errInvalidGrant, "", http.StatusBadRequest) + return + } + + s.handleDeviceToken(w, r) + default: + s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.") + } +} + +func (s *Server) handleDeviceToken(w http.ResponseWriter, r *http.Request) { + deviceCode := r.Form.Get("device_code") + if deviceCode == "" { + s.tokenErrHelper(w, errInvalidRequest, "No device code received", http.StatusBadRequest) + return + } + + now := s.now() + + // Grab the device token, check validity + deviceToken, err := s.storage.GetDeviceToken(deviceCode) + if err != nil { + if err != storage.ErrNotFound { + s.logger.Errorf("failed to get device code: %v", err) + } + s.tokenErrHelper(w, errInvalidRequest, "Invalid Device code.", http.StatusBadRequest) + return + } else if now.After(deviceToken.Expiry) { + s.tokenErrHelper(w, deviceTokenExpired, "", http.StatusBadRequest) + return + } + + // Rate Limiting check + slowDown := false + pollInterval := deviceToken.PollIntervalSeconds + minRequestTime := deviceToken.LastRequestTime.Add(time.Second * time.Duration(pollInterval)) + if now.Before(minRequestTime) { + slowDown = true + // Continually increase the poll interval until the user waits the proper time + pollInterval += 5 + } else { + pollInterval = 5 + } + + switch deviceToken.Status { + case deviceTokenPending: + updater := func(old storage.DeviceToken) (storage.DeviceToken, error) { + old.PollIntervalSeconds = pollInterval + old.LastRequestTime = now + return old, nil + } + // Update device token last request time in storage + if err := s.storage.UpdateDeviceToken(deviceCode, updater); err != nil { + s.logger.Errorf("failed to update device token: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "") + return + } + if slowDown { + s.tokenErrHelper(w, deviceTokenSlowDown, "", http.StatusBadRequest) + } else { + s.tokenErrHelper(w, deviceTokenPending, "", http.StatusUnauthorized) + } + case deviceTokenComplete: + codeChallengeFromStorage := deviceToken.PKCE.CodeChallenge + providedCodeVerifier := r.Form.Get("code_verifier") + + switch { + case providedCodeVerifier != "" && codeChallengeFromStorage != "": + calculatedCodeChallenge, err := s.calculateCodeChallenge(providedCodeVerifier, deviceToken.PKCE.CodeChallengeMethod) + if err != nil { + s.logger.Error(err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + if codeChallengeFromStorage != calculatedCodeChallenge { + s.tokenErrHelper(w, errInvalidGrant, "Invalid code_verifier.", http.StatusBadRequest) + return + } + case providedCodeVerifier != "": + // Received no code_challenge on /auth, but a code_verifier on /token + s.tokenErrHelper(w, errInvalidRequest, "No PKCE flow started. Cannot check code_verifier.", http.StatusBadRequest) + return + case codeChallengeFromStorage != "": + // Received PKCE request on /auth, but no code_verifier on /token + s.tokenErrHelper(w, errInvalidGrant, "Expecting parameter code_verifier in PKCE flow.", http.StatusBadRequest) + return + } + w.Write([]byte(deviceToken.Token)) + } +} + +func (s *Server) handleDeviceCallback(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + userCode := r.FormValue("state") + code := r.FormValue("code") + + if userCode == "" || code == "" { + s.renderError(r, w, http.StatusBadRequest, "Request was missing parameters") + return + } + + // Authorization redirect callback from OAuth2 auth flow. + if errMsg := r.FormValue("error"); errMsg != "" { + // escape the message to prevent cross-site scripting + msg := html.EscapeString(errMsg + ": " + r.FormValue("error_description")) + http.Error(w, msg, http.StatusBadRequest) + return + } + + authCode, err := s.storage.GetAuthCode(code) + if err != nil || s.now().After(authCode.Expiry) { + errCode := http.StatusBadRequest + if err != nil && err != storage.ErrNotFound { + s.logger.Errorf("failed to get auth code: %v", err) + errCode = http.StatusInternalServerError + } + s.renderError(r, w, errCode, "Invalid or expired auth code.") + return + } + + // Grab the device request from storage + deviceReq, err := s.storage.GetDeviceRequest(userCode) + if err != nil || s.now().After(deviceReq.Expiry) { + errCode := http.StatusBadRequest + if err != nil && err != storage.ErrNotFound { + s.logger.Errorf("failed to get device code: %v", err) + errCode = http.StatusInternalServerError + } + s.renderError(r, w, errCode, "Invalid or expired user code.") + return + } + + client, err := s.storage.GetClient(deviceReq.ClientID) + if err != nil { + if err != storage.ErrNotFound { + s.logger.Errorf("failed to get client: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + } else { + s.tokenErrHelper(w, errInvalidClient, "Invalid client credentials.", http.StatusUnauthorized) + } + return + } + if client.Secret != deviceReq.ClientSecret { + s.tokenErrHelper(w, errInvalidClient, "Invalid client credentials.", http.StatusUnauthorized) + return + } + + resp, err := s.exchangeAuthCode(w, authCode, client) + if err != nil { + s.logger.Errorf("Could not exchange auth code for client %q: %v", deviceReq.ClientID, err) + s.renderError(r, w, http.StatusInternalServerError, "Failed to exchange auth code.") + return + } + + // Grab the device token from storage + old, err := s.storage.GetDeviceToken(deviceReq.DeviceCode) + if err != nil || s.now().After(old.Expiry) { + errCode := http.StatusBadRequest + if err != nil && err != storage.ErrNotFound { + s.logger.Errorf("failed to get device token: %v", err) + errCode = http.StatusInternalServerError + } + s.renderError(r, w, errCode, "Invalid or expired device code.") + return + } + + updater := func(old storage.DeviceToken) (storage.DeviceToken, error) { + if old.Status == deviceTokenComplete { + return old, errors.New("device token already complete") + } + respStr, err := json.MarshalIndent(resp, "", " ") + if err != nil { + s.logger.Errorf("failed to marshal device token response: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "") + return old, err + } + + old.Token = string(respStr) + old.Status = deviceTokenComplete + return old, nil + } + + // Update refresh token in the storage, store the token and mark as complete + if err := s.storage.UpdateDeviceToken(deviceReq.DeviceCode, updater); err != nil { + s.logger.Errorf("failed to update device token: %v", err) + s.renderError(r, w, http.StatusBadRequest, "") + return + } + + if err := s.templates.deviceSuccess(r, w, client.Name); err != nil { + s.logger.Errorf("Server template error: %v", err) + s.renderError(r, w, http.StatusNotFound, "Page not found") + } + + default: + http.Error(w, fmt.Sprintf("method not implemented: %s", r.Method), http.StatusBadRequest) + return + } +} + +func (s *Server) verifyUserCode(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + err := r.ParseForm() + if err != nil { + s.logger.Warnf("Could not parse user code verification request body : %v", err) + s.renderError(r, w, http.StatusBadRequest, "") + return + } + + userCode := r.Form.Get("user_code") + if userCode == "" { + s.renderError(r, w, http.StatusBadRequest, "No user code received") + return + } + + userCode = strings.ToUpper(userCode) + + // Find the user code in the available requests + deviceRequest, err := s.storage.GetDeviceRequest(userCode) + if err != nil || s.now().After(deviceRequest.Expiry) { + if err != nil && err != storage.ErrNotFound { + s.logger.Errorf("failed to get device request: %v", err) + } + if err := s.templates.device(r, w, s.getDeviceVerificationURI(), userCode, true); err != nil { + s.logger.Errorf("Server template error: %v", err) + s.renderError(r, w, http.StatusNotFound, "Page not found") + } + return + } + + // Redirect to Dex Auth Endpoint + authURL := path.Join(s.issuerURL.Path, "/auth") + u, err := url.Parse(authURL) + if err != nil { + s.renderError(r, w, http.StatusInternalServerError, "Invalid auth URI.") + return + } + q := u.Query() + q.Set("client_id", deviceRequest.ClientID) + q.Set("client_secret", deviceRequest.ClientSecret) + q.Set("state", deviceRequest.UserCode) + q.Set("response_type", "code") + q.Set("redirect_uri", "/device/callback") + q.Set("scope", strings.Join(deviceRequest.Scopes, " ")) + u.RawQuery = q.Encode() + + http.Redirect(w, r, u.String(), http.StatusFound) + + default: + s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.") + } +} diff --git a/vendor/github.com/dexidp/dex/server/doc.go b/vendor/github.com/dexidp/dex/server/doc.go new file mode 100644 index 00000000..0f662e5d --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/doc.go @@ -0,0 +1,2 @@ +// Package server implements an OpenID Connect server with federated logins. +package server diff --git a/vendor/github.com/dexidp/dex/server/handlers.go b/vendor/github.com/dexidp/dex/server/handlers.go new file mode 100644 index 00000000..9438d807 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/handlers.go @@ -0,0 +1,1469 @@ +package server + +import ( + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/json" + "fmt" + "html/template" + "net/http" + "net/url" + "path" + "sort" + "strconv" + "strings" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/gorilla/mux" + jose "gopkg.in/square/go-jose.v2" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/server/internal" + "github.com/dexidp/dex/storage" +) + +const ( + codeChallengeMethodPlain = "plain" + codeChallengeMethodS256 = "S256" +) + +func (s *Server) handlePublicKeys(w http.ResponseWriter, r *http.Request) { + // TODO(ericchiang): Cache this. + keys, err := s.storage.GetKeys() + if err != nil { + s.logger.Errorf("failed to get keys: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Internal server error.") + return + } + + if keys.SigningKeyPub == nil { + s.logger.Errorf("No public keys found.") + s.renderError(r, w, http.StatusInternalServerError, "Internal server error.") + return + } + + jwks := jose.JSONWebKeySet{ + Keys: make([]jose.JSONWebKey, len(keys.VerificationKeys)+1), + } + jwks.Keys[0] = *keys.SigningKeyPub + for i, verificationKey := range keys.VerificationKeys { + jwks.Keys[i+1] = *verificationKey.PublicKey + } + + data, err := json.MarshalIndent(jwks, "", " ") + if err != nil { + s.logger.Errorf("failed to marshal discovery data: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Internal server error.") + return + } + maxAge := keys.NextRotation.Sub(s.now()) + if maxAge < (time.Minute * 2) { + maxAge = time.Minute * 2 + } + + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%d, must-revalidate", int(maxAge.Seconds()))) + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(len(data))) + w.Write(data) +} + +type discovery struct { + Issuer string `json:"issuer"` + Auth string `json:"authorization_endpoint"` + Token string `json:"token_endpoint"` + Keys string `json:"jwks_uri"` + UserInfo string `json:"userinfo_endpoint"` + DeviceEndpoint string `json:"device_authorization_endpoint"` + GrantTypes []string `json:"grant_types_supported"` + ResponseTypes []string `json:"response_types_supported"` + Subjects []string `json:"subject_types_supported"` + IDTokenAlgs []string `json:"id_token_signing_alg_values_supported"` + CodeChallengeAlgs []string `json:"code_challenge_methods_supported"` + Scopes []string `json:"scopes_supported"` + AuthMethods []string `json:"token_endpoint_auth_methods_supported"` + Claims []string `json:"claims_supported"` +} + +func (s *Server) discoveryHandler() (http.HandlerFunc, error) { + d := discovery{ + Issuer: s.issuerURL.String(), + Auth: s.absURL("/auth"), + Token: s.absURL("/token"), + Keys: s.absURL("/keys"), + UserInfo: s.absURL("/userinfo"), + DeviceEndpoint: s.absURL("/device/code"), + Subjects: []string{"public"}, + IDTokenAlgs: []string{string(jose.RS256)}, + CodeChallengeAlgs: []string{codeChallengeMethodS256, codeChallengeMethodPlain}, + Scopes: []string{"openid", "email", "groups", "profile", "offline_access"}, + AuthMethods: []string{"client_secret_basic", "client_secret_post"}, + Claims: []string{ + "iss", "sub", "aud", "iat", "exp", "email", "email_verified", + "locale", "name", "preferred_username", "at_hash", + }, + } + + for responseType := range s.supportedResponseTypes { + d.ResponseTypes = append(d.ResponseTypes, responseType) + } + sort.Strings(d.ResponseTypes) + + d.GrantTypes = s.supportedGrantTypes + + data, err := json.MarshalIndent(d, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal discovery data: %v", err) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(len(data))) + w.Write(data) + }), nil +} + +// handleAuthorization handles the OAuth2 auth endpoint. +func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) { + // Extract the arguments + if err := r.ParseForm(); err != nil { + s.logger.Errorf("Failed to parse arguments: %v", err) + + s.renderError(r, w, http.StatusBadRequest, err.Error()) + return + } + + connectorID := r.Form.Get("connector_id") + + connectors, err := s.storage.ListConnectors() + if err != nil { + s.logger.Errorf("Failed to get list of connectors: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Failed to retrieve connector list.") + return + } + + // We don't need connector_id any more + r.Form.Del("connector_id") + + // Construct a URL with all of the arguments in its query + connURL := url.URL{ + RawQuery: r.Form.Encode(), + } + + // Redirect if a client chooses a specific connector_id + if connectorID != "" { + for _, c := range connectors { + if c.ID == connectorID { + connURL.Path = s.absPath("/auth", url.PathEscape(c.ID)) + http.Redirect(w, r, connURL.String(), http.StatusFound) + return + } + } + s.renderError(r, w, http.StatusBadRequest, "Connector ID does not match a valid Connector") + return + } + + if len(connectors) == 1 && !s.alwaysShowLogin { + connURL.Path = s.absPath("/auth", url.PathEscape(connectors[0].ID)) + http.Redirect(w, r, connURL.String(), http.StatusFound) + } + + connectorInfos := make([]connectorInfo, len(connectors)) + for index, conn := range connectors { + connURL.Path = s.absPath("/auth", url.PathEscape(conn.ID)) + connectorInfos[index] = connectorInfo{ + ID: conn.ID, + Name: conn.Name, + Type: conn.Type, + URL: template.URL(connURL.String()), + } + } + + if err := s.templates.login(r, w, connectorInfos); err != nil { + s.logger.Errorf("Server template error: %v", err) + } +} + +func (s *Server) handleConnectorLogin(w http.ResponseWriter, r *http.Request) { + authReq, err := s.parseAuthorizationRequest(r) + if err != nil { + s.logger.Errorf("Failed to parse authorization request: %v", err) + + switch authErr := err.(type) { + case *redirectedAuthErr: + authErr.Handler().ServeHTTP(w, r) + case *displayedAuthErr: + s.renderError(r, w, authErr.Status, err.Error()) + default: + panic("unsupported error type") + } + + return + } + + connID, err := url.PathUnescape(mux.Vars(r)["connector"]) + if err != nil { + s.logger.Errorf("Failed to parse connector: %v", err) + s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist") + return + } + + conn, err := s.getConnector(connID) + if err != nil { + s.logger.Errorf("Failed to get connector: %v", err) + s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist") + return + } + + // Set the connector being used for the login. + if authReq.ConnectorID != "" && authReq.ConnectorID != connID { + s.logger.Errorf("Mismatched connector ID in auth request: %s vs %s", + authReq.ConnectorID, connID) + s.renderError(r, w, http.StatusBadRequest, "Bad connector ID") + return + } + + authReq.ConnectorID = connID + + // Actually create the auth request + authReq.Expiry = s.now().Add(s.authRequestsValidFor) + if err := s.storage.CreateAuthRequest(*authReq); err != nil { + s.logger.Errorf("Failed to create authorization request: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Failed to connect to the database.") + return + } + + scopes := parseScopes(authReq.Scopes) + + // Work out where the "Select another login method" link should go. + backLink := "" + if len(s.connectors) > 1 { + backLinkURL := url.URL{ + Path: s.absPath("/auth"), + RawQuery: r.Form.Encode(), + } + backLink = backLinkURL.String() + } + + switch r.Method { + case http.MethodGet: + switch conn := conn.Connector.(type) { + case connector.CallbackConnector: + // Use the auth request ID as the "state" token. + // + // TODO(ericchiang): Is this appropriate or should we also be using a nonce? + callbackURL, err := conn.LoginURL(scopes, s.absURL("/callback"), authReq.ID) + if err != nil { + s.logger.Errorf("Connector %q returned error when creating callback: %v", connID, err) + s.renderError(r, w, http.StatusInternalServerError, "Login error.") + return + } + http.Redirect(w, r, callbackURL, http.StatusFound) + case connector.PasswordConnector: + loginURL := url.URL{ + Path: s.absPath("/auth", connID, "login"), + } + q := loginURL.Query() + q.Set("state", authReq.ID) + q.Set("back", backLink) + loginURL.RawQuery = q.Encode() + + http.Redirect(w, r, loginURL.String(), http.StatusFound) + case connector.SAMLConnector: + action, value, err := conn.POSTData(scopes, authReq.ID) + if err != nil { + s.logger.Errorf("Creating SAML data: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Connector Login Error") + return + } + + // TODO(ericchiang): Don't inline this. + fmt.Fprintf(w, ` + + + + SAML login + + +
+ + + + + + `, action, value, authReq.ID) + default: + s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.") + } + default: + s.renderError(r, w, http.StatusBadRequest, "Unsupported request method.") + } +} + +func (s *Server) handlePasswordLogin(w http.ResponseWriter, r *http.Request) { + authID := r.URL.Query().Get("state") + if authID == "" { + s.renderError(r, w, http.StatusBadRequest, "User session error.") + return + } + + backLink := r.URL.Query().Get("back") + + authReq, err := s.storage.GetAuthRequest(authID) + if err != nil { + if err == storage.ErrNotFound { + s.logger.Errorf("Invalid 'state' parameter provided: %v", err) + s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.") + return + } + s.logger.Errorf("Failed to get auth request: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Database error.") + return + } + + connID, err := url.PathUnescape(mux.Vars(r)["connector"]) + if err != nil { + s.logger.Errorf("Failed to parse connector: %v", err) + s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist") + return + } else if connID != "" && connID != authReq.ConnectorID { + s.logger.Errorf("Connector mismatch: authentication started with id %q, but password login for id %q was triggered", authReq.ConnectorID, connID) + s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.") + return + } + + conn, err := s.getConnector(authReq.ConnectorID) + if err != nil { + s.logger.Errorf("Failed to get connector with id %q : %v", authReq.ConnectorID, err) + s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.") + return + } + + pwConn, ok := conn.Connector.(connector.PasswordConnector) + if !ok { + s.logger.Errorf("Expected password connector in handlePasswordLogin(), but got %v", pwConn) + s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.") + return + } + + switch r.Method { + case http.MethodGet: + if err := s.templates.password(r, w, r.URL.String(), "", usernamePrompt(pwConn), false, backLink); err != nil { + s.logger.Errorf("Server template error: %v", err) + } + case http.MethodPost: + username := r.FormValue("login") + password := r.FormValue("password") + scopes := parseScopes(authReq.Scopes) + + identity, ok, err := pwConn.Login(r.Context(), scopes, username, password) + if err != nil { + s.logger.Errorf("Failed to login user: %v", err) + s.renderError(r, w, http.StatusInternalServerError, fmt.Sprintf("Login error: %v", err)) + return + } + if !ok { + if err := s.templates.password(r, w, r.URL.String(), username, usernamePrompt(pwConn), true, backLink); err != nil { + s.logger.Errorf("Server template error: %v", err) + } + return + } + redirectURL, canSkipApproval, err := s.finalizeLogin(identity, authReq, conn.Connector) + if err != nil { + s.logger.Errorf("Failed to finalize login: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Login error.") + return + } + + if canSkipApproval { + authReq, err = s.storage.GetAuthRequest(authReq.ID) + if err != nil { + s.logger.Errorf("Failed to get finalized auth request: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Login error.") + return + } + s.sendCodeResponse(w, r, authReq) + return + } + + http.Redirect(w, r, redirectURL, http.StatusSeeOther) + default: + s.renderError(r, w, http.StatusBadRequest, "Unsupported request method.") + } +} + +func (s *Server) handleConnectorCallback(w http.ResponseWriter, r *http.Request) { + var authID string + switch r.Method { + case http.MethodGet: // OAuth2 callback + if authID = r.URL.Query().Get("state"); authID == "" { + s.renderError(r, w, http.StatusBadRequest, "User session error.") + return + } + case http.MethodPost: // SAML POST binding + if authID = r.PostFormValue("RelayState"); authID == "" { + s.renderError(r, w, http.StatusBadRequest, "User session error.") + return + } + default: + s.renderError(r, w, http.StatusBadRequest, "Method not supported") + return + } + + authReq, err := s.storage.GetAuthRequest(authID) + if err != nil { + if err == storage.ErrNotFound { + s.logger.Errorf("Invalid 'state' parameter provided: %v", err) + s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.") + return + } + s.logger.Errorf("Failed to get auth request: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Database error.") + return + } + + connID, err := url.PathUnescape(mux.Vars(r)["connector"]) + if err != nil { + s.logger.Errorf("Failed to get connector with id %q : %v", authReq.ConnectorID, err) + s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.") + return + } else if connID != "" && connID != authReq.ConnectorID { + s.logger.Errorf("Connector mismatch: authentication started with id %q, but callback for id %q was triggered", authReq.ConnectorID, connID) + s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.") + return + } + + conn, err := s.getConnector(authReq.ConnectorID) + if err != nil { + s.logger.Errorf("Failed to get connector with id %q : %v", authReq.ConnectorID, err) + s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.") + return + } + + var identity connector.Identity + switch conn := conn.Connector.(type) { + case connector.CallbackConnector: + if r.Method != http.MethodGet { + s.logger.Errorf("SAML request mapped to OAuth2 connector") + s.renderError(r, w, http.StatusBadRequest, "Invalid request") + return + } + identity, err = conn.HandleCallback(parseScopes(authReq.Scopes), r) + case connector.SAMLConnector: + if r.Method != http.MethodPost { + s.logger.Errorf("OAuth2 request mapped to SAML connector") + s.renderError(r, w, http.StatusBadRequest, "Invalid request") + return + } + identity, err = conn.HandlePOST(parseScopes(authReq.Scopes), r.PostFormValue("SAMLResponse"), authReq.ID) + default: + s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.") + return + } + + if err != nil { + s.logger.Errorf("Failed to authenticate: %v", err) + s.renderError(r, w, http.StatusInternalServerError, fmt.Sprintf("Failed to authenticate: %v", err)) + return + } + + redirectURL, canSkipApproval, err := s.finalizeLogin(identity, authReq, conn.Connector) + if err != nil { + s.logger.Errorf("Failed to finalize login: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Login error.") + return + } + + if canSkipApproval { + authReq, err = s.storage.GetAuthRequest(authReq.ID) + if err != nil { + s.logger.Errorf("Failed to get finalized auth request: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Login error.") + return + } + s.sendCodeResponse(w, r, authReq) + return + } + + http.Redirect(w, r, redirectURL, http.StatusSeeOther) +} + +// finalizeLogin associates the user's identity with the current AuthRequest, then returns +// the approval page's path. +func (s *Server) finalizeLogin(identity connector.Identity, authReq storage.AuthRequest, conn connector.Connector) (string, bool, error) { + claims := storage.Claims{ + UserID: identity.UserID, + Username: identity.Username, + PreferredUsername: identity.PreferredUsername, + Email: identity.Email, + EmailVerified: identity.EmailVerified, + Groups: identity.Groups, + } + + updater := func(a storage.AuthRequest) (storage.AuthRequest, error) { + a.LoggedIn = true + a.Claims = claims + a.ConnectorData = identity.ConnectorData + return a, nil + } + if err := s.storage.UpdateAuthRequest(authReq.ID, updater); err != nil { + return "", false, fmt.Errorf("failed to update auth request: %v", err) + } + + email := claims.Email + if !claims.EmailVerified { + email += " (unverified)" + } + + s.logger.Infof("login successful: connector %q, username=%q, preferred_username=%q, email=%q, groups=%q", + authReq.ConnectorID, claims.Username, claims.PreferredUsername, email, claims.Groups) + + // we can skip the redirect to /approval and go ahead and send code if it's not required + if s.skipApproval && !authReq.ForceApprovalPrompt { + return "", true, nil + } + + // an HMAC is used here to ensure that the request ID is unpredictable, ensuring that an attacker who intercepted the original + // flow would be unable to poll for the result at the /approval endpoint + h := hmac.New(sha256.New, authReq.HMACKey) + h.Write([]byte(authReq.ID)) + mac := h.Sum(nil) + + returnURL := path.Join(s.issuerURL.Path, "/approval") + "?req=" + authReq.ID + "&hmac=" + base64.RawURLEncoding.EncodeToString(mac) + _, ok := conn.(connector.RefreshConnector) + if !ok { + return returnURL, false, nil + } + + // Try to retrieve an existing OfflineSession object for the corresponding user. + session, err := s.storage.GetOfflineSessions(identity.UserID, authReq.ConnectorID) + if err != nil { + if err != storage.ErrNotFound { + s.logger.Errorf("failed to get offline session: %v", err) + return "", false, err + } + offlineSessions := storage.OfflineSessions{ + UserID: identity.UserID, + ConnID: authReq.ConnectorID, + Refresh: make(map[string]*storage.RefreshTokenRef), + ConnectorData: identity.ConnectorData, + } + + // Create a new OfflineSession object for the user and add a reference object for + // the newly received refreshtoken. + if err := s.storage.CreateOfflineSessions(offlineSessions); err != nil { + s.logger.Errorf("failed to create offline session: %v", err) + return "", false, err + } + + return returnURL, false, nil + } + + // Update existing OfflineSession obj with new RefreshTokenRef. + if err := s.storage.UpdateOfflineSessions(session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) { + if len(identity.ConnectorData) > 0 { + old.ConnectorData = identity.ConnectorData + } + return old, nil + }); err != nil { + s.logger.Errorf("failed to update offline session: %v", err) + return "", false, err + } + + return returnURL, false, nil +} + +func (s *Server) handleApproval(w http.ResponseWriter, r *http.Request) { + macEncoded := r.FormValue("hmac") + if macEncoded == "" { + s.renderError(r, w, http.StatusUnauthorized, "Unauthorized request") + return + } + mac, err := base64.RawURLEncoding.DecodeString(macEncoded) + if err != nil { + s.renderError(r, w, http.StatusUnauthorized, "Unauthorized request") + return + } + + authReq, err := s.storage.GetAuthRequest(r.FormValue("req")) + if err != nil { + s.logger.Errorf("Failed to get auth request: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Database error.") + return + } + if !authReq.LoggedIn { + s.logger.Errorf("Auth request does not have an identity for approval") + s.renderError(r, w, http.StatusInternalServerError, "Login process not yet finalized.") + return + } + + // build expected hmac with secret key + h := hmac.New(sha256.New, authReq.HMACKey) + h.Write([]byte(authReq.ID)) + expectedMAC := h.Sum(nil) + // constant time comparison + if !hmac.Equal(mac, expectedMAC) { + s.renderError(r, w, http.StatusUnauthorized, "Unauthorized request") + return + } + + switch r.Method { + case http.MethodGet: + // TODO: `finalizeLogin()` now sends code directly to client without going through this endpoint, + // the `if skipApproval { ... }` block needs to be removed after a grace period. + if s.skipApproval { + s.sendCodeResponse(w, r, authReq) + return + } + client, err := s.storage.GetClient(authReq.ClientID) + if err != nil { + s.logger.Errorf("Failed to get client %q: %v", authReq.ClientID, err) + s.renderError(r, w, http.StatusInternalServerError, "Failed to retrieve client.") + return + } + if err := s.templates.approval(r, w, authReq.ID, authReq.Claims.Username, client.Name, authReq.Scopes); err != nil { + s.logger.Errorf("Server template error: %v", err) + } + case http.MethodPost: + if r.FormValue("approval") != "approve" { + s.renderError(r, w, http.StatusInternalServerError, "Approval rejected.") + return + } + s.sendCodeResponse(w, r, authReq) + } +} + +func (s *Server) sendCodeResponse(w http.ResponseWriter, r *http.Request, authReq storage.AuthRequest) { + if s.now().After(authReq.Expiry) { + s.renderError(r, w, http.StatusBadRequest, "User session has expired.") + return + } + + if err := s.storage.DeleteAuthRequest(authReq.ID); err != nil { + if err != storage.ErrNotFound { + s.logger.Errorf("Failed to delete authorization request: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Internal server error.") + } else { + s.renderError(r, w, http.StatusBadRequest, "User session error.") + } + return + } + u, err := url.Parse(authReq.RedirectURI) + if err != nil { + s.renderError(r, w, http.StatusInternalServerError, "Invalid redirect URI.") + return + } + + var ( + // Was the initial request using the implicit or hybrid flow instead of + // the "normal" code flow? + implicitOrHybrid = false + + // Only present in hybrid or code flow. code.ID == "" if this is not set. + code storage.AuthCode + + // ID token returned immediately if the response_type includes "id_token". + // Only valid for implicit and hybrid flows. + idToken string + idTokenExpiry time.Time + + // Access token + accessToken string + ) + + for _, responseType := range authReq.ResponseTypes { + switch responseType { + case responseTypeCode: + code = storage.AuthCode{ + ID: storage.NewID(), + ClientID: authReq.ClientID, + ConnectorID: authReq.ConnectorID, + Nonce: authReq.Nonce, + Scopes: authReq.Scopes, + Claims: authReq.Claims, + Expiry: s.now().Add(time.Minute * 30), + RedirectURI: authReq.RedirectURI, + ConnectorData: authReq.ConnectorData, + PKCE: authReq.PKCE, + } + if err := s.storage.CreateAuthCode(code); err != nil { + s.logger.Errorf("Failed to create auth code: %v", err) + s.renderError(r, w, http.StatusInternalServerError, "Internal server error.") + return + } + + // Implicit and hybrid flows that try to use the OOB redirect URI are + // rejected earlier. If we got here we're using the code flow. + if authReq.RedirectURI == redirectURIOOB { + if err := s.templates.oob(r, w, code.ID); err != nil { + s.logger.Errorf("Server template error: %v", err) + } + return + } + case responseTypeToken: + implicitOrHybrid = true + case responseTypeIDToken: + implicitOrHybrid = true + var err error + + accessToken, _, err = s.newAccessToken(authReq.ClientID, authReq.Claims, authReq.Scopes, authReq.Nonce, authReq.ConnectorID) + if err != nil { + s.logger.Errorf("failed to create new access token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + + idToken, idTokenExpiry, err = s.newIDToken(authReq.ClientID, authReq.Claims, authReq.Scopes, authReq.Nonce, accessToken, code.ID, authReq.ConnectorID) + if err != nil { + s.logger.Errorf("failed to create ID token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + } + } + + if implicitOrHybrid { + v := url.Values{} + v.Set("access_token", accessToken) + v.Set("token_type", "bearer") + v.Set("state", authReq.State) + if idToken != "" { + v.Set("id_token", idToken) + // The hybrid flow with only "code token" or "code id_token" doesn't return an + // "expires_in" value. If "code" wasn't provided, indicating the implicit flow, + // don't add it. + // + // https://openid.net/specs/openid-connect-core-1_0.html#HybridAuthResponse + if code.ID == "" { + v.Set("expires_in", strconv.Itoa(int(idTokenExpiry.Sub(s.now()).Seconds()))) + } + } + if code.ID != "" { + v.Set("code", code.ID) + } + + // Implicit and hybrid flows return their values as part of the fragment. + // + // HTTP/1.1 303 See Other + // Location: https://client.example.org/cb# + // access_token=SlAV32hkKG + // &token_type=bearer + // &id_token=eyJ0 ... NiJ9.eyJ1c ... I6IjIifX0.DeWt4Qu ... ZXso + // &expires_in=3600 + // &state=af0ifjsldkj + // + u.Fragment = v.Encode() + } else { + // The code flow add values to the URL query. + // + // HTTP/1.1 303 See Other + // Location: https://client.example.org/cb? + // code=SplxlOBeZQQYbYS6WxSbIA + // &state=af0ifjsldkj + // + q := u.Query() + q.Set("code", code.ID) + q.Set("state", authReq.State) + u.RawQuery = q.Encode() + } + + http.Redirect(w, r, u.String(), http.StatusSeeOther) +} + +func (s *Server) withClientFromStorage(w http.ResponseWriter, r *http.Request, handler func(http.ResponseWriter, *http.Request, storage.Client)) { + clientID, clientSecret, ok := r.BasicAuth() + if ok { + var err error + if clientID, err = url.QueryUnescape(clientID); err != nil { + s.tokenErrHelper(w, errInvalidRequest, "client_id improperly encoded", http.StatusBadRequest) + return + } + if clientSecret, err = url.QueryUnescape(clientSecret); err != nil { + s.tokenErrHelper(w, errInvalidRequest, "client_secret improperly encoded", http.StatusBadRequest) + return + } + } else { + clientID = r.PostFormValue("client_id") + clientSecret = r.PostFormValue("client_secret") + } + + client, err := s.storage.GetClient(clientID) + if err != nil { + if err != storage.ErrNotFound { + s.logger.Errorf("failed to get client: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + } else { + s.tokenErrHelper(w, errInvalidClient, "Invalid client credentials.", http.StatusUnauthorized) + } + return + } + + if subtle.ConstantTimeCompare([]byte(client.Secret), []byte(clientSecret)) != 1 { + if clientSecret == "" { + s.logger.Infof("missing client_secret on token request for client: %s", client.ID) + } else { + s.logger.Infof("invalid client_secret on token request for client: %s", client.ID) + } + s.tokenErrHelper(w, errInvalidClient, "Invalid client credentials.", http.StatusUnauthorized) + return + } + + handler(w, r, client) +} + +func (s *Server) handleToken(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + if r.Method != http.MethodPost { + s.tokenErrHelper(w, errInvalidRequest, "method not allowed", http.StatusBadRequest) + return + } + + err := r.ParseForm() + if err != nil { + s.logger.Errorf("Could not parse request body: %v", err) + s.tokenErrHelper(w, errInvalidRequest, "", http.StatusBadRequest) + return + } + + grantType := r.PostFormValue("grant_type") + if !contains(s.supportedGrantTypes, grantType) { + s.logger.Errorf("unsupported grant type: %v", grantType) + s.tokenErrHelper(w, errUnsupportedGrantType, "", http.StatusBadRequest) + return + } + switch grantType { + case grantTypeDeviceCode: + s.handleDeviceToken(w, r) + case grantTypeAuthorizationCode: + s.withClientFromStorage(w, r, s.handleAuthCode) + case grantTypeRefreshToken: + s.withClientFromStorage(w, r, s.handleRefreshToken) + case grantTypePassword: + s.withClientFromStorage(w, r, s.handlePasswordGrant) + case grantTypeTokenExchange: + s.withClientFromStorage(w, r, s.handleTokenExchange) + default: + s.tokenErrHelper(w, errUnsupportedGrantType, "", http.StatusBadRequest) + } +} + +func (s *Server) calculateCodeChallenge(codeVerifier, codeChallengeMethod string) (string, error) { + switch codeChallengeMethod { + case codeChallengeMethodPlain: + return codeVerifier, nil + case codeChallengeMethodS256: + shaSum := sha256.Sum256([]byte(codeVerifier)) + return base64.RawURLEncoding.EncodeToString(shaSum[:]), nil + default: + return "", fmt.Errorf("unknown challenge method (%v)", codeChallengeMethod) + } +} + +// handle an access token request https://tools.ietf.org/html/rfc6749#section-4.1.3 +func (s *Server) handleAuthCode(w http.ResponseWriter, r *http.Request, client storage.Client) { + code := r.PostFormValue("code") + redirectURI := r.PostFormValue("redirect_uri") + + if code == "" { + s.tokenErrHelper(w, errInvalidRequest, `Required param: code.`, http.StatusBadRequest) + return + } + + authCode, err := s.storage.GetAuthCode(code) + if err != nil || s.now().After(authCode.Expiry) || authCode.ClientID != client.ID { + if err != storage.ErrNotFound { + s.logger.Errorf("failed to get auth code: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + } else { + s.tokenErrHelper(w, errInvalidGrant, "Invalid or expired code parameter.", http.StatusBadRequest) + } + return + } + + // RFC 7636 (PKCE) + codeChallengeFromStorage := authCode.PKCE.CodeChallenge + providedCodeVerifier := r.PostFormValue("code_verifier") + + switch { + case providedCodeVerifier != "" && codeChallengeFromStorage != "": + calculatedCodeChallenge, err := s.calculateCodeChallenge(providedCodeVerifier, authCode.PKCE.CodeChallengeMethod) + if err != nil { + s.logger.Error(err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + if codeChallengeFromStorage != calculatedCodeChallenge { + s.tokenErrHelper(w, errInvalidGrant, "Invalid code_verifier.", http.StatusBadRequest) + return + } + case providedCodeVerifier != "": + // Received no code_challenge on /auth, but a code_verifier on /token + s.tokenErrHelper(w, errInvalidRequest, "No PKCE flow started. Cannot check code_verifier.", http.StatusBadRequest) + return + case codeChallengeFromStorage != "": + // Received PKCE request on /auth, but no code_verifier on /token + s.tokenErrHelper(w, errInvalidGrant, "Expecting parameter code_verifier in PKCE flow.", http.StatusBadRequest) + return + } + + if authCode.RedirectURI != redirectURI { + s.tokenErrHelper(w, errInvalidRequest, "redirect_uri did not match URI from initial request.", http.StatusBadRequest) + return + } + + tokenResponse, err := s.exchangeAuthCode(w, authCode, client) + if err != nil { + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + s.writeAccessToken(w, tokenResponse) +} + +func (s *Server) exchangeAuthCode(w http.ResponseWriter, authCode storage.AuthCode, client storage.Client) (*accessTokenResponse, error) { + accessToken, _, err := s.newAccessToken(client.ID, authCode.Claims, authCode.Scopes, authCode.Nonce, authCode.ConnectorID) + if err != nil { + s.logger.Errorf("failed to create new access token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return nil, err + } + + idToken, expiry, err := s.newIDToken(client.ID, authCode.Claims, authCode.Scopes, authCode.Nonce, accessToken, authCode.ID, authCode.ConnectorID) + if err != nil { + s.logger.Errorf("failed to create ID token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return nil, err + } + + if err := s.storage.DeleteAuthCode(authCode.ID); err != nil { + s.logger.Errorf("failed to delete auth code: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return nil, err + } + + reqRefresh := func() bool { + // Ensure the connector supports refresh tokens. + // + // Connectors like `saml` do not implement RefreshConnector. + conn, err := s.getConnector(authCode.ConnectorID) + if err != nil { + s.logger.Errorf("connector with ID %q not found: %v", authCode.ConnectorID, err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return false + } + + _, ok := conn.Connector.(connector.RefreshConnector) + if !ok { + return false + } + + for _, scope := range authCode.Scopes { + if scope == scopeOfflineAccess { + return true + } + } + return false + }() + var refreshToken string + if reqRefresh { + refresh := storage.RefreshToken{ + ID: storage.NewID(), + Token: storage.NewID(), + ClientID: authCode.ClientID, + ConnectorID: authCode.ConnectorID, + Scopes: authCode.Scopes, + Claims: authCode.Claims, + Nonce: authCode.Nonce, + ConnectorData: authCode.ConnectorData, + CreatedAt: s.now(), + LastUsed: s.now(), + } + token := &internal.RefreshToken{ + RefreshId: refresh.ID, + Token: refresh.Token, + } + if refreshToken, err = internal.Marshal(token); err != nil { + s.logger.Errorf("failed to marshal refresh token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return nil, err + } + + if err := s.storage.CreateRefresh(refresh); err != nil { + s.logger.Errorf("failed to create refresh token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return nil, err + } + + // deleteToken determines if we need to delete the newly created refresh token + // due to a failure in updating/creating the OfflineSession object for the + // corresponding user. + var deleteToken bool + defer func() { + if deleteToken { + // Delete newly created refresh token from storage. + if err := s.storage.DeleteRefresh(refresh.ID); err != nil { + s.logger.Errorf("failed to delete refresh token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + } + }() + + tokenRef := storage.RefreshTokenRef{ + ID: refresh.ID, + ClientID: refresh.ClientID, + CreatedAt: refresh.CreatedAt, + LastUsed: refresh.LastUsed, + } + + // Try to retrieve an existing OfflineSession object for the corresponding user. + if session, err := s.storage.GetOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID); err != nil { + if err != storage.ErrNotFound { + s.logger.Errorf("failed to get offline session: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + deleteToken = true + return nil, err + } + offlineSessions := storage.OfflineSessions{ + UserID: refresh.Claims.UserID, + ConnID: refresh.ConnectorID, + Refresh: make(map[string]*storage.RefreshTokenRef), + } + offlineSessions.Refresh[tokenRef.ClientID] = &tokenRef + + // Create a new OfflineSession object for the user and add a reference object for + // the newly received refreshtoken. + if err := s.storage.CreateOfflineSessions(offlineSessions); err != nil { + s.logger.Errorf("failed to create offline session: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + deleteToken = true + return nil, err + } + } else { + if oldTokenRef, ok := session.Refresh[tokenRef.ClientID]; ok { + // Delete old refresh token from storage. + if err := s.storage.DeleteRefresh(oldTokenRef.ID); err != nil && err != storage.ErrNotFound { + s.logger.Errorf("failed to delete refresh token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + deleteToken = true + return nil, err + } + } + + // Update existing OfflineSession obj with new RefreshTokenRef. + if err := s.storage.UpdateOfflineSessions(session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) { + old.Refresh[tokenRef.ClientID] = &tokenRef + return old, nil + }); err != nil { + s.logger.Errorf("failed to update offline session: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + deleteToken = true + return nil, err + } + } + } + return s.toAccessTokenResponse(idToken, accessToken, refreshToken, expiry), nil +} + +func (s *Server) handleUserInfo(w http.ResponseWriter, r *http.Request) { + const prefix = "Bearer " + + auth := r.Header.Get("authorization") + if len(auth) < len(prefix) || !strings.EqualFold(prefix, auth[:len(prefix)]) { + w.Header().Set("WWW-Authenticate", "Bearer") + s.tokenErrHelper(w, errAccessDenied, "Invalid bearer token.", http.StatusUnauthorized) + return + } + rawIDToken := auth[len(prefix):] + + verifier := oidc.NewVerifier(s.issuerURL.String(), &storageKeySet{s.storage}, &oidc.Config{SkipClientIDCheck: true}) + idToken, err := verifier.Verify(r.Context(), rawIDToken) + if err != nil { + s.tokenErrHelper(w, errAccessDenied, err.Error(), http.StatusForbidden) + return + } + + var claims json.RawMessage + if err := idToken.Claims(&claims); err != nil { + s.tokenErrHelper(w, errServerError, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(claims) +} + +func (s *Server) handlePasswordGrant(w http.ResponseWriter, r *http.Request, client storage.Client) { + // Parse the fields + if err := r.ParseForm(); err != nil { + s.tokenErrHelper(w, errInvalidRequest, "Couldn't parse data", http.StatusBadRequest) + return + } + q := r.Form + + nonce := q.Get("nonce") + // Some clients, like the old go-oidc, provide extra whitespace. Tolerate this. + scopes := strings.Fields(q.Get("scope")) + + // Parse the scopes if they are passed + var ( + unrecognized []string + invalidScopes []string + ) + hasOpenIDScope := false + for _, scope := range scopes { + switch scope { + case scopeOpenID: + hasOpenIDScope = true + case scopeOfflineAccess, scopeEmail, scopeProfile, scopeGroups, scopeFederatedID: + default: + peerID, ok := parseCrossClientScope(scope) + if !ok { + unrecognized = append(unrecognized, scope) + continue + } + + isTrusted, err := s.validateCrossClientTrust(client.ID, peerID) + if err != nil { + s.tokenErrHelper(w, errInvalidClient, fmt.Sprintf("Error validating cross client trust %v.", err), http.StatusBadRequest) + return + } + if !isTrusted { + invalidScopes = append(invalidScopes, scope) + } + } + } + if !hasOpenIDScope { + s.tokenErrHelper(w, errInvalidRequest, `Missing required scope(s) ["openid"].`, http.StatusBadRequest) + return + } + if len(unrecognized) > 0 { + s.tokenErrHelper(w, errInvalidRequest, fmt.Sprintf("Unrecognized scope(s) %q", unrecognized), http.StatusBadRequest) + return + } + if len(invalidScopes) > 0 { + s.tokenErrHelper(w, errInvalidRequest, fmt.Sprintf("Client can't request scope(s) %q", invalidScopes), http.StatusBadRequest) + return + } + + // Which connector + connID := s.passwordConnector + conn, err := s.getConnector(connID) + if err != nil { + s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not exist.", http.StatusBadRequest) + return + } + + passwordConnector, ok := conn.Connector.(connector.PasswordConnector) + if !ok { + s.tokenErrHelper(w, errInvalidRequest, "Requested password connector does not correct type.", http.StatusBadRequest) + return + } + + // Login + username := q.Get("username") + password := q.Get("password") + identity, ok, err := passwordConnector.Login(r.Context(), parseScopes(scopes), username, password) + if err != nil { + s.logger.Errorf("Failed to login user: %v", err) + s.tokenErrHelper(w, errInvalidRequest, "Could not login user", http.StatusBadRequest) + return + } + if !ok { + s.tokenErrHelper(w, errAccessDenied, "Invalid username or password", http.StatusUnauthorized) + return + } + + // Build the claims to send the id token + claims := storage.Claims{ + UserID: identity.UserID, + Username: identity.Username, + PreferredUsername: identity.PreferredUsername, + Email: identity.Email, + EmailVerified: identity.EmailVerified, + Groups: identity.Groups, + } + + accessToken, _, err := s.newAccessToken(client.ID, claims, scopes, nonce, connID) + if err != nil { + s.logger.Errorf("password grant failed to create new access token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + + idToken, expiry, err := s.newIDToken(client.ID, claims, scopes, nonce, accessToken, "", connID) + if err != nil { + s.logger.Errorf("password grant failed to create new ID token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + + reqRefresh := func() bool { + // Ensure the connector supports refresh tokens. + // + // Connectors like `saml` do not implement RefreshConnector. + _, ok := conn.Connector.(connector.RefreshConnector) + if !ok { + return false + } + + for _, scope := range scopes { + if scope == scopeOfflineAccess { + return true + } + } + return false + }() + var refreshToken string + if reqRefresh { + refresh := storage.RefreshToken{ + ID: storage.NewID(), + Token: storage.NewID(), + ClientID: client.ID, + ConnectorID: connID, + Scopes: scopes, + Claims: claims, + Nonce: nonce, + // ConnectorData: authCode.ConnectorData, + CreatedAt: s.now(), + LastUsed: s.now(), + } + token := &internal.RefreshToken{ + RefreshId: refresh.ID, + Token: refresh.Token, + } + if refreshToken, err = internal.Marshal(token); err != nil { + s.logger.Errorf("failed to marshal refresh token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + + if err := s.storage.CreateRefresh(refresh); err != nil { + s.logger.Errorf("failed to create refresh token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + + // deleteToken determines if we need to delete the newly created refresh token + // due to a failure in updating/creating the OfflineSession object for the + // corresponding user. + var deleteToken bool + defer func() { + if deleteToken { + // Delete newly created refresh token from storage. + if err := s.storage.DeleteRefresh(refresh.ID); err != nil { + s.logger.Errorf("failed to delete refresh token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + } + }() + + tokenRef := storage.RefreshTokenRef{ + ID: refresh.ID, + ClientID: refresh.ClientID, + CreatedAt: refresh.CreatedAt, + LastUsed: refresh.LastUsed, + } + + // Try to retrieve an existing OfflineSession object for the corresponding user. + if session, err := s.storage.GetOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID); err != nil { + if err != storage.ErrNotFound { + s.logger.Errorf("failed to get offline session: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + deleteToken = true + return + } + offlineSessions := storage.OfflineSessions{ + UserID: refresh.Claims.UserID, + ConnID: refresh.ConnectorID, + Refresh: make(map[string]*storage.RefreshTokenRef), + ConnectorData: identity.ConnectorData, + } + offlineSessions.Refresh[tokenRef.ClientID] = &tokenRef + + // Create a new OfflineSession object for the user and add a reference object for + // the newly received refreshtoken. + if err := s.storage.CreateOfflineSessions(offlineSessions); err != nil { + s.logger.Errorf("failed to create offline session: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + deleteToken = true + return + } + } else { + if oldTokenRef, ok := session.Refresh[tokenRef.ClientID]; ok { + // Delete old refresh token from storage. + if err := s.storage.DeleteRefresh(oldTokenRef.ID); err != nil { + if err == storage.ErrNotFound { + s.logger.Warnf("database inconsistent, refresh token missing: %v", oldTokenRef.ID) + } else { + s.logger.Errorf("failed to delete refresh token: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + deleteToken = true + return + } + } + } + + // Update existing OfflineSession obj with new RefreshTokenRef. + if err := s.storage.UpdateOfflineSessions(session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) { + old.Refresh[tokenRef.ClientID] = &tokenRef + old.ConnectorData = identity.ConnectorData + return old, nil + }); err != nil { + s.logger.Errorf("failed to update offline session: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + deleteToken = true + return + } + } + } + + resp := s.toAccessTokenResponse(idToken, accessToken, refreshToken, expiry) + s.writeAccessToken(w, resp) +} + +func (s *Server) handleTokenExchange(w http.ResponseWriter, r *http.Request, client storage.Client) { + ctx := r.Context() + + if err := r.ParseForm(); err != nil { + s.logger.Errorf("could not parse request body: %v", err) + s.tokenErrHelper(w, errInvalidRequest, "", http.StatusBadRequest) + return + } + q := r.Form + + scopes := strings.Fields(q.Get("scope")) // OPTIONAL, map to issued token scope + requestedTokenType := q.Get("requested_token_type") // OPTIONAL, default to access token + if requestedTokenType == "" { + requestedTokenType = tokenTypeAccess + } + subjectToken := q.Get("subject_token") // REQUIRED + subjectTokenType := q.Get("subject_token_type") // REQUIRED + connID := q.Get("connector_id") // REQUIRED, not in RFC + + switch subjectTokenType { + case tokenTypeID, tokenTypeAccess: // ok, continue + default: + s.tokenErrHelper(w, errRequestNotSupported, "Invalid subject_token_type.", http.StatusBadRequest) + return + } + + if subjectToken == "" { + s.tokenErrHelper(w, errInvalidRequest, "Missing subject_token", http.StatusBadRequest) + return + } + + conn, err := s.getConnector(connID) + if err != nil { + s.logger.Errorf("failed to get connector: %v", err) + s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not exist.", http.StatusBadRequest) + return + } + teConn, ok := conn.Connector.(connector.TokenIdentityConnector) + if !ok { + s.logger.Errorf("connector doesn't implement token exchange: %v", connID) + s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not exist.", http.StatusBadRequest) + return + } + identity, err := teConn.TokenIdentity(ctx, subjectTokenType, subjectToken) + if err != nil { + s.logger.Errorf("failed to verify subject token: %v", err) + s.tokenErrHelper(w, errAccessDenied, "", http.StatusUnauthorized) + return + } + + claims := storage.Claims{ + UserID: identity.UserID, + Username: identity.Username, + PreferredUsername: identity.PreferredUsername, + Email: identity.Email, + EmailVerified: identity.EmailVerified, + Groups: identity.Groups, + } + resp := accessTokenResponse{ + IssuedTokenType: requestedTokenType, + TokenType: "bearer", + } + var expiry time.Time + switch requestedTokenType { + case tokenTypeID: + resp.AccessToken, expiry, err = s.newIDToken(client.ID, claims, scopes, "", "", "", connID) + case tokenTypeAccess: + resp.AccessToken, expiry, err = s.newAccessToken(client.ID, claims, scopes, "", connID) + default: + s.tokenErrHelper(w, errRequestNotSupported, "Invalid requested_token_type.", http.StatusBadRequest) + return + } + if err != nil { + s.logger.Errorf("token exchange failed to create new %v token: %v", requestedTokenType, err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + resp.ExpiresIn = int(time.Until(expiry).Seconds()) + + // Token response must include cache headers https://tools.ietf.org/html/rfc6749#section-5.1 + w.Header().Set("Cache-Control", "no-store") + w.Header().Set("Pragma", "no-cache") + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(resp) +} + +type accessTokenResponse struct { + AccessToken string `json:"access_token"` + IssuedTokenType string `json:"issued_token_type,omitempty"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + IDToken string `json:"id_token,omitempty"` + Scope string `json:"scope,omitempty"` +} + +func (s *Server) toAccessTokenResponse(idToken, accessToken, refreshToken string, expiry time.Time) *accessTokenResponse { + return &accessTokenResponse{ + AccessToken: accessToken, + TokenType: "bearer", + ExpiresIn: int(expiry.Sub(s.now()).Seconds()), + RefreshToken: refreshToken, + IDToken: idToken, + } +} + +func (s *Server) writeAccessToken(w http.ResponseWriter, resp *accessTokenResponse) { + data, err := json.Marshal(resp) + if err != nil { + s.logger.Errorf("failed to marshal access token response: %v", err) + s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(len(data))) + + // Token response must include cache headers https://tools.ietf.org/html/rfc6749#section-5.1 + w.Header().Set("Cache-Control", "no-store") + w.Header().Set("Pragma", "no-cache") + w.Write(data) +} + +func (s *Server) renderError(r *http.Request, w http.ResponseWriter, status int, description string) { + if err := s.templates.err(r, w, status, description); err != nil { + s.logger.Errorf("server template error: %v", err) + } +} + +func (s *Server) tokenErrHelper(w http.ResponseWriter, typ string, description string, statusCode int) { + if err := tokenErr(w, typ, description, statusCode); err != nil { + s.logger.Errorf("token error response: %v", err) + } +} + +// Check for username prompt override from connector. Defaults to "Username". +func usernamePrompt(conn connector.PasswordConnector) string { + if attr := conn.Prompt(); attr != "" { + return attr + } + return "Username" +} diff --git a/vendor/github.com/dexidp/dex/server/internal/BUILD b/vendor/github.com/dexidp/dex/server/internal/BUILD new file mode 100644 index 00000000..59048adb --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/internal/BUILD @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "internal", + srcs = [ + "codec.go", + "types.pb.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/server/internal", + importpath = "github.com/dexidp/dex/server/internal", + visibility = [ + "//third_party:__subpackages__", + "//vendor/github.com/dexidp/dex/server:__subpackages__", + ], + deps = [ + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//runtime/protoimpl", + ], +) diff --git a/vendor/github.com/dexidp/dex/server/internal/codec.go b/vendor/github.com/dexidp/dex/server/internal/codec.go new file mode 100644 index 00000000..45bdbca1 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/internal/codec.go @@ -0,0 +1,25 @@ +package internal + +import ( + "encoding/base64" + + "google.golang.org/protobuf/proto" +) + +// Marshal converts a protobuf message to a URL legal string. +func Marshal(message proto.Message) (string, error) { + data, err := proto.Marshal(message) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(data), nil +} + +// Unmarshal decodes a protobuf message. +func Unmarshal(s string, message proto.Message) error { + data, err := base64.RawURLEncoding.DecodeString(s) + if err != nil { + return err + } + return proto.Unmarshal(data, message) +} diff --git a/vendor/github.com/dexidp/dex/server/internal/types.pb.go b/vendor/github.com/dexidp/dex/server/internal/types.pb.go new file mode 100644 index 00000000..211839bf --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/internal/types.pb.go @@ -0,0 +1,232 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: server/internal/types.proto + +// Package internal holds protobuf types used by the server. + +package internal + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// RefreshToken is a message that holds refresh token data used by dex. +type RefreshToken struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RefreshId string `protobuf:"bytes,1,opt,name=refresh_id,json=refreshId,proto3" json:"refresh_id,omitempty"` + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` +} + +func (x *RefreshToken) Reset() { + *x = RefreshToken{} + if protoimpl.UnsafeEnabled { + mi := &file_server_internal_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RefreshToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RefreshToken) ProtoMessage() {} + +func (x *RefreshToken) ProtoReflect() protoreflect.Message { + mi := &file_server_internal_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RefreshToken.ProtoReflect.Descriptor instead. +func (*RefreshToken) Descriptor() ([]byte, []int) { + return file_server_internal_types_proto_rawDescGZIP(), []int{0} +} + +func (x *RefreshToken) GetRefreshId() string { + if x != nil { + return x.RefreshId + } + return "" +} + +func (x *RefreshToken) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +// IDTokenSubject represents both the userID and connID which is returned +// as the "sub" claim in the ID Token. +type IDTokenSubject struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + ConnId string `protobuf:"bytes,2,opt,name=conn_id,json=connId,proto3" json:"conn_id,omitempty"` +} + +func (x *IDTokenSubject) Reset() { + *x = IDTokenSubject{} + if protoimpl.UnsafeEnabled { + mi := &file_server_internal_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IDTokenSubject) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IDTokenSubject) ProtoMessage() {} + +func (x *IDTokenSubject) ProtoReflect() protoreflect.Message { + mi := &file_server_internal_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IDTokenSubject.ProtoReflect.Descriptor instead. +func (*IDTokenSubject) Descriptor() ([]byte, []int) { + return file_server_internal_types_proto_rawDescGZIP(), []int{1} +} + +func (x *IDTokenSubject) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +func (x *IDTokenSubject) GetConnId() string { + if x != nil { + return x.ConnId + } + return "" +} + +var File_server_internal_types_proto protoreflect.FileDescriptor + +var file_server_internal_types_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x22, 0x43, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x42, 0x0a, 0x0e, + 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, + 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x64, + 0x42, 0x27, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, + 0x65, 0x78, 0x69, 0x64, 0x70, 0x2f, 0x64, 0x65, 0x78, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_server_internal_types_proto_rawDescOnce sync.Once + file_server_internal_types_proto_rawDescData = file_server_internal_types_proto_rawDesc +) + +func file_server_internal_types_proto_rawDescGZIP() []byte { + file_server_internal_types_proto_rawDescOnce.Do(func() { + file_server_internal_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_server_internal_types_proto_rawDescData) + }) + return file_server_internal_types_proto_rawDescData +} + +var file_server_internal_types_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_server_internal_types_proto_goTypes = []interface{}{ + (*RefreshToken)(nil), // 0: internal.RefreshToken + (*IDTokenSubject)(nil), // 1: internal.IDTokenSubject +} +var file_server_internal_types_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_server_internal_types_proto_init() } +func file_server_internal_types_proto_init() { + if File_server_internal_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_server_internal_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_server_internal_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IDTokenSubject); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_server_internal_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_server_internal_types_proto_goTypes, + DependencyIndexes: file_server_internal_types_proto_depIdxs, + MessageInfos: file_server_internal_types_proto_msgTypes, + }.Build() + File_server_internal_types_proto = out.File + file_server_internal_types_proto_rawDesc = nil + file_server_internal_types_proto_goTypes = nil + file_server_internal_types_proto_depIdxs = nil +} diff --git a/vendor/github.com/dexidp/dex/server/internal/types.proto b/vendor/github.com/dexidp/dex/server/internal/types.proto new file mode 100644 index 00000000..6a949fe8 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/internal/types.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +// Package internal holds protobuf types used by the server. +package internal; + +option go_package = "github.com/dexidp/dex/server/internal"; + +// RefreshToken is a message that holds refresh token data used by dex. +message RefreshToken { + string refresh_id = 1; + string token = 2; +} + +// IDTokenSubject represents both the userID and connID which is returned +// as the "sub" claim in the ID Token. +message IDTokenSubject { + string user_id = 1; + string conn_id = 2; +} diff --git a/vendor/github.com/dexidp/dex/server/oauth2.go b/vendor/github.com/dexidp/dex/server/oauth2.go new file mode 100644 index 00000000..b72431e0 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/oauth2.go @@ -0,0 +1,702 @@ +package server + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "io" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + jose "gopkg.in/square/go-jose.v2" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/server/internal" + "github.com/dexidp/dex/storage" +) + +// TODO(ericchiang): clean this file up and figure out more idiomatic error handling. + +// See: https://tools.ietf.org/html/rfc6749#section-4.1.2.1 + +// displayedAuthErr is an error that should be displayed to the user as a web page +type displayedAuthErr struct { + Status int + Description string +} + +func (err *displayedAuthErr) Error() string { + return err.Description +} + +func newDisplayedErr(status int, format string, a ...interface{}) *displayedAuthErr { + return &displayedAuthErr{status, fmt.Sprintf(format, a...)} +} + +// redirectedAuthErr is an error that should be reported back to the client by 302 redirect +type redirectedAuthErr struct { + State string + RedirectURI string + Type string + Description string +} + +func (err *redirectedAuthErr) Error() string { + return err.Description +} + +func (err *redirectedAuthErr) Handler() http.Handler { + hf := func(w http.ResponseWriter, r *http.Request) { + v := url.Values{} + v.Add("state", err.State) + v.Add("error", err.Type) + if err.Description != "" { + v.Add("error_description", err.Description) + } + var redirectURI string + if strings.Contains(err.RedirectURI, "?") { + redirectURI = err.RedirectURI + "&" + v.Encode() + } else { + redirectURI = err.RedirectURI + "?" + v.Encode() + } + http.Redirect(w, r, redirectURI, http.StatusSeeOther) + } + return http.HandlerFunc(hf) +} + +func tokenErr(w http.ResponseWriter, typ, description string, statusCode int) error { + data := struct { + Error string `json:"error"` + Description string `json:"error_description,omitempty"` + }{typ, description} + body, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("failed to marshal token error response: %v", err) + } + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(len(body))) + w.WriteHeader(statusCode) + w.Write(body) + return nil +} + +const ( + errInvalidRequest = "invalid_request" + errUnauthorizedClient = "unauthorized_client" + errAccessDenied = "access_denied" + errUnsupportedResponseType = "unsupported_response_type" + errRequestNotSupported = "request_not_supported" + errInvalidScope = "invalid_scope" + errServerError = "server_error" + errTemporarilyUnavailable = "temporarily_unavailable" + errUnsupportedGrantType = "unsupported_grant_type" + errInvalidGrant = "invalid_grant" + errInvalidClient = "invalid_client" +) + +const ( + scopeOfflineAccess = "offline_access" // Request a refresh token. + scopeOpenID = "openid" + scopeGroups = "groups" + scopeEmail = "email" + scopeProfile = "profile" + scopeFederatedID = "federated:id" + scopeCrossClientPrefix = "audience:server:client_id:" +) + +const ( + deviceCallbackURI = "/device/callback" +) + +const ( + redirectURIOOB = "urn:ietf:wg:oauth:2.0:oob" +) + +const ( + grantTypeAuthorizationCode = "authorization_code" + grantTypeRefreshToken = "refresh_token" + grantTypeImplicit = "implicit" + grantTypePassword = "password" + grantTypeDeviceCode = "urn:ietf:params:oauth:grant-type:device_code" + grantTypeTokenExchange = "urn:ietf:params:oauth:grant-type:token-exchange" +) + +const ( + // https://www.rfc-editor.org/rfc/rfc8693.html#section-3 + tokenTypeAccess = "urn:ietf:params:oauth:token-type:access_token" + tokenTypeRefresh = "urn:ietf:params:oauth:token-type:refresh_token" + tokenTypeID = "urn:ietf:params:oauth:token-type:id_token" + tokenTypeSAML1 = "urn:ietf:params:oauth:token-type:saml1" + tokenTypeSAML2 = "urn:ietf:params:oauth:token-type:saml2" + tokenTypeJWT = "urn:ietf:params:oauth:token-type:jwt" +) + +const ( + responseTypeCode = "code" // "Regular" flow + responseTypeToken = "token" // Implicit flow for frontend apps. + responseTypeIDToken = "id_token" // ID Token in url fragment + responseTypeCodeToken = "code token" // "Regular" flow + Implicit flow + responseTypeCodeIDToken = "code id_token" // "Regular" flow + ID Token + responseTypeIDTokenToken = "id_token token" // ID Token + Implicit flow + responseTypeCodeIDTokenToken = "code id_token token" // "Regular" flow + ID Token + Implicit flow +) + +const ( + deviceTokenPending = "authorization_pending" + deviceTokenComplete = "complete" + deviceTokenSlowDown = "slow_down" + deviceTokenExpired = "expired_token" +) + +func parseScopes(scopes []string) connector.Scopes { + var s connector.Scopes + for _, scope := range scopes { + switch scope { + case scopeOfflineAccess: + s.OfflineAccess = true + case scopeGroups: + s.Groups = true + } + } + return s +} + +// Determine the signature algorithm for a JWT. +func signatureAlgorithm(jwk *jose.JSONWebKey) (alg jose.SignatureAlgorithm, err error) { + if jwk.Key == nil { + return alg, errors.New("no signing key") + } + switch key := jwk.Key.(type) { + case *rsa.PrivateKey: + // Because OIDC mandates that we support RS256, we always return that + // value. In the future, we might want to make this configurable on a + // per client basis. For example allowing PS256 or ECDSA variants. + // + // See https://github.com/dexidp/dex/issues/692 + return jose.RS256, nil + case *ecdsa.PrivateKey: + // We don't actually support ECDSA keys yet, but they're tested for + // in case we want to in the future. + // + // These values are prescribed depending on the ECDSA key type. We + // can't return different values. + switch key.Params() { + case elliptic.P256().Params(): + return jose.ES256, nil + case elliptic.P384().Params(): + return jose.ES384, nil + case elliptic.P521().Params(): + return jose.ES512, nil + default: + return alg, errors.New("unsupported ecdsa curve") + } + default: + return alg, fmt.Errorf("unsupported signing key type %T", key) + } +} + +func signPayload(key *jose.JSONWebKey, alg jose.SignatureAlgorithm, payload []byte) (jws string, err error) { + signingKey := jose.SigningKey{Key: key, Algorithm: alg} + + signer, err := jose.NewSigner(signingKey, &jose.SignerOptions{}) + if err != nil { + return "", fmt.Errorf("new signer: %v", err) + } + signature, err := signer.Sign(payload) + if err != nil { + return "", fmt.Errorf("signing payload: %v", err) + } + return signature.CompactSerialize() +} + +// The hash algorithm for the at_hash is determined by the signing +// algorithm used for the id_token. From the spec: +// +// ...the hash algorithm used is the hash algorithm used in the alg Header +// Parameter of the ID Token's JOSE Header. For instance, if the alg is RS256, +// hash the access_token value with SHA-256 +// +// https://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDToken +var hashForSigAlg = map[jose.SignatureAlgorithm]func() hash.Hash{ + jose.RS256: sha256.New, + jose.RS384: sha512.New384, + jose.RS512: sha512.New, + jose.ES256: sha256.New, + jose.ES384: sha512.New384, + jose.ES512: sha512.New, +} + +// Compute an at_hash from a raw access token and a signature algorithm +// +// See: https://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDToken +func accessTokenHash(alg jose.SignatureAlgorithm, accessToken string) (string, error) { + newHash, ok := hashForSigAlg[alg] + if !ok { + return "", fmt.Errorf("unsupported signature algorithm: %s", alg) + } + + hashFunc := newHash() + if _, err := io.WriteString(hashFunc, accessToken); err != nil { + return "", fmt.Errorf("computing hash: %v", err) + } + sum := hashFunc.Sum(nil) + return base64.RawURLEncoding.EncodeToString(sum[:len(sum)/2]), nil +} + +type audience []string + +func (a audience) contains(aud string) bool { + for _, e := range a { + if aud == e { + return true + } + } + return false +} + +func (a audience) MarshalJSON() ([]byte, error) { + if len(a) == 1 { + return json.Marshal(a[0]) + } + return json.Marshal([]string(a)) +} + +type idTokenClaims struct { + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience audience `json:"aud"` + Expiry int64 `json:"exp"` + IssuedAt int64 `json:"iat"` + AuthorizingParty string `json:"azp,omitempty"` + Nonce string `json:"nonce,omitempty"` + + AccessTokenHash string `json:"at_hash,omitempty"` + CodeHash string `json:"c_hash,omitempty"` + + Email string `json:"email,omitempty"` + EmailVerified *bool `json:"email_verified,omitempty"` + + Groups []string `json:"groups,omitempty"` + + Name string `json:"name,omitempty"` + PreferredUsername string `json:"preferred_username,omitempty"` + + FederatedIDClaims *federatedIDClaims `json:"federated_claims,omitempty"` +} + +type federatedIDClaims struct { + ConnectorID string `json:"connector_id,omitempty"` + UserID string `json:"user_id,omitempty"` +} + +func (s *Server) newAccessToken(clientID string, claims storage.Claims, scopes []string, nonce, connID string) (accessToken string, expiry time.Time, err error) { + return s.newIDToken(clientID, claims, scopes, nonce, storage.NewID(), "", connID) +} + +func (s *Server) newIDToken(clientID string, claims storage.Claims, scopes []string, nonce, accessToken, code, connID string) (idToken string, expiry time.Time, err error) { + keys, err := s.storage.GetKeys() + if err != nil { + s.logger.Errorf("Failed to get keys: %v", err) + return "", expiry, err + } + + signingKey := keys.SigningKey + if signingKey == nil { + return "", expiry, fmt.Errorf("no key to sign payload with") + } + signingAlg, err := signatureAlgorithm(signingKey) + if err != nil { + return "", expiry, err + } + + issuedAt := s.now() + expiry = issuedAt.Add(s.idTokensValidFor) + + sub := &internal.IDTokenSubject{ + UserId: claims.UserID, + ConnId: connID, + } + + subjectString, err := internal.Marshal(sub) + if err != nil { + s.logger.Errorf("failed to marshal offline session ID: %v", err) + return "", expiry, fmt.Errorf("failed to marshal offline session ID: %v", err) + } + + tok := idTokenClaims{ + Issuer: s.issuerURL.String(), + Subject: subjectString, + Nonce: nonce, + Expiry: expiry.Unix(), + IssuedAt: issuedAt.Unix(), + } + + if accessToken != "" { + atHash, err := accessTokenHash(signingAlg, accessToken) + if err != nil { + s.logger.Errorf("error computing at_hash: %v", err) + return "", expiry, fmt.Errorf("error computing at_hash: %v", err) + } + tok.AccessTokenHash = atHash + } + + if code != "" { + cHash, err := accessTokenHash(signingAlg, code) + if err != nil { + s.logger.Errorf("error computing c_hash: %v", err) + return "", expiry, fmt.Errorf("error computing c_hash: #{err}") + } + tok.CodeHash = cHash + } + + for _, scope := range scopes { + switch { + case scope == scopeEmail: + tok.Email = claims.Email + tok.EmailVerified = &claims.EmailVerified + case scope == scopeGroups: + tok.Groups = claims.Groups + case scope == scopeProfile: + tok.Name = claims.Username + tok.PreferredUsername = claims.PreferredUsername + case scope == scopeFederatedID: + tok.FederatedIDClaims = &federatedIDClaims{ + ConnectorID: connID, + UserID: claims.UserID, + } + default: + peerID, ok := parseCrossClientScope(scope) + if !ok { + // Ignore unknown scopes. These are already validated during the + // initial auth request. + continue + } + isTrusted, err := s.validateCrossClientTrust(clientID, peerID) + if err != nil { + return "", expiry, err + } + if !isTrusted { + // TODO(ericchiang): propagate this error to the client. + return "", expiry, fmt.Errorf("peer (%s) does not trust client", peerID) + } + tok.Audience = append(tok.Audience, peerID) + } + } + + if len(tok.Audience) == 0 { + // Client didn't ask for cross client audience. Set the current + // client as the audience. + tok.Audience = audience{clientID} + } else { + // Client asked for cross client audience: + // if the current client was not requested explicitly + if !tok.Audience.contains(clientID) { + // by default it becomes one of entries in Audience + tok.Audience = append(tok.Audience, clientID) + } + // The current client becomes the authorizing party. + tok.AuthorizingParty = clientID + } + + payload, err := json.Marshal(tok) + if err != nil { + return "", expiry, fmt.Errorf("could not serialize claims: %v", err) + } + + if idToken, err = signPayload(signingKey, signingAlg, payload); err != nil { + return "", expiry, fmt.Errorf("failed to sign payload: %v", err) + } + return idToken, expiry, nil +} + +// parse the initial request from the OAuth2 client. +func (s *Server) parseAuthorizationRequest(r *http.Request) (*storage.AuthRequest, error) { + if err := r.ParseForm(); err != nil { + return nil, newDisplayedErr(http.StatusBadRequest, "Failed to parse request.") + } + q := r.Form + redirectURI, err := url.QueryUnescape(q.Get("redirect_uri")) + if err != nil { + return nil, newDisplayedErr(http.StatusBadRequest, "No redirect_uri provided.") + } + + clientID := q.Get("client_id") + state := q.Get("state") + nonce := q.Get("nonce") + connectorID := q.Get("connector_id") + // Some clients, like the old go-oidc, provide extra whitespace. Tolerate this. + scopes := strings.Fields(q.Get("scope")) + responseTypes := strings.Fields(q.Get("response_type")) + + codeChallenge := q.Get("code_challenge") + codeChallengeMethod := q.Get("code_challenge_method") + + if codeChallengeMethod == "" { + codeChallengeMethod = codeChallengeMethodPlain + } + + client, err := s.storage.GetClient(clientID) + if err != nil { + if err == storage.ErrNotFound { + return nil, newDisplayedErr(http.StatusNotFound, "Invalid client_id (%q).", clientID) + } + s.logger.Errorf("Failed to get client: %v", err) + return nil, newDisplayedErr(http.StatusInternalServerError, "Database error.") + } + + if !validateRedirectURI(client, redirectURI) { + return nil, newDisplayedErr(http.StatusBadRequest, "Unregistered redirect_uri (%q).", redirectURI) + } + if redirectURI == deviceCallbackURI && client.Public { + redirectURI = s.issuerURL.Path + deviceCallbackURI + } + + // From here on out, we want to redirect back to the client with an error. + newRedirectedErr := func(typ, format string, a ...interface{}) *redirectedAuthErr { + return &redirectedAuthErr{state, redirectURI, typ, fmt.Sprintf(format, a...)} + } + + if connectorID != "" { + connectors, err := s.storage.ListConnectors() + if err != nil { + s.logger.Errorf("Failed to list connectors: %v", err) + return nil, newRedirectedErr(errServerError, "Unable to retrieve connectors") + } + if !validateConnectorID(connectors, connectorID) { + return nil, newRedirectedErr(errInvalidRequest, "Invalid ConnectorID") + } + } + + // dex doesn't support request parameter and must return request_not_supported error + // https://openid.net/specs/openid-connect-core-1_0.html#6.1 + if q.Get("request") != "" { + return nil, newRedirectedErr(errRequestNotSupported, "Server does not support request parameter.") + } + + if codeChallengeMethod != codeChallengeMethodS256 && codeChallengeMethod != codeChallengeMethodPlain { + description := fmt.Sprintf("Unsupported PKCE challenge method (%q).", codeChallengeMethod) + return nil, newRedirectedErr(errInvalidRequest, description) + } + + var ( + unrecognized []string + invalidScopes []string + ) + hasOpenIDScope := false + for _, scope := range scopes { + switch scope { + case scopeOpenID: + hasOpenIDScope = true + case scopeOfflineAccess, scopeEmail, scopeProfile, scopeGroups, scopeFederatedID: + default: + peerID, ok := parseCrossClientScope(scope) + if !ok { + unrecognized = append(unrecognized, scope) + continue + } + + isTrusted, err := s.validateCrossClientTrust(clientID, peerID) + if err != nil { + return nil, newRedirectedErr(errServerError, "Internal server error.") + } + if !isTrusted { + invalidScopes = append(invalidScopes, scope) + } + } + } + if !hasOpenIDScope { + return nil, newRedirectedErr(errInvalidScope, `Missing required scope(s) ["openid"].`) + } + if len(unrecognized) > 0 { + return nil, newRedirectedErr(errInvalidScope, "Unrecognized scope(s) %q", unrecognized) + } + if len(invalidScopes) > 0 { + return nil, newRedirectedErr(errInvalidScope, "Client can't request scope(s) %q", invalidScopes) + } + + var rt struct { + code bool + idToken bool + token bool + } + + for _, responseType := range responseTypes { + switch responseType { + case responseTypeCode: + rt.code = true + case responseTypeIDToken: + rt.idToken = true + case responseTypeToken: + rt.token = true + default: + return nil, newRedirectedErr(errInvalidRequest, "Invalid response type %q", responseType) + } + + if !s.supportedResponseTypes[responseType] { + return nil, newRedirectedErr(errUnsupportedResponseType, "Unsupported response type %q", responseType) + } + } + + if len(responseTypes) == 0 { + return nil, newRedirectedErr(errInvalidRequest, "No response_type provided") + } + + if rt.token && !rt.code && !rt.idToken { + // "token" can't be provided by its own. + // + // https://openid.net/specs/openid-connect-core-1_0.html#Authentication + return nil, newRedirectedErr(errInvalidRequest, "Response type 'token' must be provided with type 'id_token' and/or 'code'") + } + if !rt.code { + // Either "id_token token" or "id_token" has been provided which implies the + // implicit flow. Implicit flow requires a nonce value. + // + // https://openid.net/specs/openid-connect-core-1_0.html#ImplicitAuthRequest + if nonce == "" { + return nil, newRedirectedErr(errInvalidRequest, "Response type 'token' requires a 'nonce' value.") + } + } + if rt.token { + if redirectURI == redirectURIOOB { + err := fmt.Sprintf("Cannot use response type 'token' with redirect_uri '%s'.", redirectURIOOB) + return nil, newRedirectedErr(errInvalidRequest, err) + } + } + + return &storage.AuthRequest{ + ID: storage.NewID(), + ClientID: client.ID, + State: state, + Nonce: nonce, + ForceApprovalPrompt: q.Get("approval_prompt") == "force", + Scopes: scopes, + RedirectURI: redirectURI, + ResponseTypes: responseTypes, + ConnectorID: connectorID, + PKCE: storage.PKCE{ + CodeChallenge: codeChallenge, + CodeChallengeMethod: codeChallengeMethod, + }, + HMACKey: storage.NewHMACKey(crypto.SHA256), + }, nil +} + +func parseCrossClientScope(scope string) (peerID string, ok bool) { + if ok = strings.HasPrefix(scope, scopeCrossClientPrefix); ok { + peerID = scope[len(scopeCrossClientPrefix):] + } + return +} + +func (s *Server) validateCrossClientTrust(clientID, peerID string) (trusted bool, err error) { + if peerID == clientID { + return true, nil + } + peer, err := s.storage.GetClient(peerID) + if err != nil { + if err != storage.ErrNotFound { + s.logger.Errorf("Failed to get client: %v", err) + return false, err + } + return false, nil + } + for _, id := range peer.TrustedPeers { + if id == clientID { + return true, nil + } + } + return false, nil +} + +func validateRedirectURI(client storage.Client, redirectURI string) bool { + // Allow named RedirectURIs for both public and non-public clients. + // This is required make PKCE-enabled web apps work, when configured as public clients. + for _, uri := range client.RedirectURIs { + if redirectURI == uri { + return true + } + } + // For non-public clients or when RedirectURIs is set, we allow only explicitly named RedirectURIs. + // Otherwise, we check below for special URIs used for desktop or mobile apps. + if !client.Public || len(client.RedirectURIs) > 0 { + return false + } + + if redirectURI == redirectURIOOB || redirectURI == deviceCallbackURI { + return true + } + + // verify that the host is of form "http://localhost:(port)(path)" or "http://localhost(path)" + u, err := url.Parse(redirectURI) + if err != nil { + return false + } + if u.Scheme != "http" { + return false + } + if u.Host == "localhost" { + return true + } + host, _, err := net.SplitHostPort(u.Host) + return err == nil && host == "localhost" +} + +func validateConnectorID(connectors []storage.Connector, connectorID string) bool { + for _, c := range connectors { + if c.ID == connectorID { + return true + } + } + return false +} + +// storageKeySet implements the oidc.KeySet interface backed by Dex storage +type storageKeySet struct { + storage.Storage +} + +func (s *storageKeySet) VerifySignature(_ context.Context, jwt string) (payload []byte, err error) { + jws, err := jose.ParseSigned(jwt) + if err != nil { + return nil, err + } + + keyID := "" + for _, sig := range jws.Signatures { + keyID = sig.Header.KeyID + break + } + + skeys, err := s.Storage.GetKeys() + if err != nil { + return nil, err + } + + keys := []*jose.JSONWebKey{skeys.SigningKeyPub} + for _, vk := range skeys.VerificationKeys { + keys = append(keys, vk.PublicKey) + } + + for _, key := range keys { + if keyID == "" || key.KeyID == keyID { + if payload, err := jws.Verify(key); err == nil { + return payload, nil + } + } + } + + return nil, errors.New("failed to verify id token signature") +} diff --git a/vendor/github.com/dexidp/dex/server/refreshhandlers.go b/vendor/github.com/dexidp/dex/server/refreshhandlers.go new file mode 100644 index 00000000..b3918ab4 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/refreshhandlers.go @@ -0,0 +1,387 @@ +package server + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/server/internal" + "github.com/dexidp/dex/storage" +) + +func contains(arr []string, item string) bool { + for _, itemFromArray := range arr { + if itemFromArray == item { + return true + } + } + return false +} + +type refreshError struct { + msg string + code int + desc string +} + +func (r *refreshError) Error() string { + return fmt.Sprintf("refresh token error: status %d, %q %s", r.code, r.msg, r.desc) +} + +func newInternalServerError() *refreshError { + return &refreshError{msg: errInvalidRequest, desc: "", code: http.StatusInternalServerError} +} + +func newBadRequestError(desc string) *refreshError { + return &refreshError{msg: errInvalidRequest, desc: desc, code: http.StatusBadRequest} +} + +func (s *Server) refreshTokenErrHelper(w http.ResponseWriter, err *refreshError) { + s.tokenErrHelper(w, err.msg, err.desc, err.code) +} + +func (s *Server) extractRefreshTokenFromRequest(r *http.Request) (*internal.RefreshToken, *refreshError) { + code := r.PostFormValue("refresh_token") + if code == "" { + return nil, newBadRequestError("No refresh token is found in request.") + } + + token := new(internal.RefreshToken) + if err := internal.Unmarshal(code, token); err != nil { + // For backward compatibility, assume the refresh_token is a raw refresh token ID + // if it fails to decode. + // + // Because refresh_token values that aren't unmarshable were generated by servers + // that don't have a Token value, we'll still reject any attempts to claim a + // refresh_token twice. + token = &internal.RefreshToken{RefreshId: code, Token: ""} + } + + return token, nil +} + +type refreshContext struct { + storageToken *storage.RefreshToken + requestToken *internal.RefreshToken + + connector Connector + connectorData []byte + + scopes []string +} + +// getRefreshTokenFromStorage checks that refresh token is valid and exists in the storage and gets its info +func (s *Server) getRefreshTokenFromStorage(clientID string, token *internal.RefreshToken) (*refreshContext, *refreshError) { + refreshCtx := refreshContext{requestToken: token} + + invalidErr := newBadRequestError("Refresh token is invalid or has already been claimed by another client.") + + // Get RefreshToken + refresh, err := s.storage.GetRefresh(token.RefreshId) + if err != nil { + if err != storage.ErrNotFound { + s.logger.Errorf("failed to get refresh token: %v", err) + return nil, newInternalServerError() + } + return nil, invalidErr + } + + if refresh.ClientID != clientID { + s.logger.Errorf("client %s trying to claim token for client %s", clientID, refresh.ClientID) + // According to https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 Dex should respond with an + // invalid grant error if token has already been claimed by another client. + return nil, &refreshError{msg: errInvalidGrant, desc: invalidErr.desc, code: http.StatusBadRequest} + } + + if refresh.Token != token.Token { + switch { + case !s.refreshTokenPolicy.AllowedToReuse(refresh.LastUsed): + fallthrough + case refresh.ObsoleteToken != token.Token: + fallthrough + case refresh.ObsoleteToken == "": + s.logger.Errorf("refresh token with id %s claimed twice", refresh.ID) + return nil, invalidErr + } + } + + expiredErr := newBadRequestError("Refresh token expired.") + if s.refreshTokenPolicy.CompletelyExpired(refresh.CreatedAt) { + s.logger.Errorf("refresh token with id %s expired", refresh.ID) + return nil, expiredErr + } + + if s.refreshTokenPolicy.ExpiredBecauseUnused(refresh.LastUsed) { + s.logger.Errorf("refresh token with id %s expired due to inactivity", refresh.ID) + return nil, expiredErr + } + + refreshCtx.storageToken = &refresh + + // Get Connector + refreshCtx.connector, err = s.getConnector(refresh.ConnectorID) + if err != nil { + s.logger.Errorf("connector with ID %q not found: %v", refresh.ConnectorID, err) + return nil, newInternalServerError() + } + + // Get Connector Data + session, err := s.storage.GetOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID) + switch { + case err != nil: + if err != storage.ErrNotFound { + s.logger.Errorf("failed to get offline session: %v", err) + return nil, newInternalServerError() + } + case len(refresh.ConnectorData) > 0: + // Use the old connector data if it exists, should be deleted once used + refreshCtx.connectorData = refresh.ConnectorData + default: + refreshCtx.connectorData = session.ConnectorData + } + + return &refreshCtx, nil +} + +func (s *Server) getRefreshScopes(r *http.Request, refresh *storage.RefreshToken) ([]string, *refreshError) { + // Per the OAuth2 spec, if the client has omitted the scopes, default to the original + // authorized scopes. + // + // https://tools.ietf.org/html/rfc6749#section-6 + scope := r.PostFormValue("scope") + + if scope == "" { + return refresh.Scopes, nil + } + + requestedScopes := strings.Fields(scope) + var unauthorizedScopes []string + + // Per the OAuth2 spec, if the client has omitted the scopes, default to the original + // authorized scopes. + // + // https://tools.ietf.org/html/rfc6749#section-6 + for _, requestScope := range requestedScopes { + if !contains(refresh.Scopes, requestScope) { + unauthorizedScopes = append(unauthorizedScopes, requestScope) + } + } + + if len(unauthorizedScopes) > 0 { + desc := fmt.Sprintf("Requested scopes contain unauthorized scope(s): %q.", unauthorizedScopes) + return nil, newBadRequestError(desc) + } + + return requestedScopes, nil +} + +func (s *Server) refreshWithConnector(ctx context.Context, rCtx *refreshContext, ident connector.Identity) (connector.Identity, *refreshError) { + // Can the connector refresh the identity? If so, attempt to refresh the data + // in the connector. + // + // TODO(ericchiang): We may want a strict mode where connectors that don't implement + // this interface can't perform refreshing. + if refreshConn, ok := rCtx.connector.Connector.(connector.RefreshConnector); ok { + // Set connector data to the one received from an offline session + ident.ConnectorData = rCtx.connectorData + s.logger.Debugf("connector data before refresh: %s", ident.ConnectorData) + + newIdent, err := refreshConn.Refresh(ctx, parseScopes(rCtx.scopes), ident) + if err != nil { + s.logger.Errorf("failed to refresh identity: %v", err) + return ident, newInternalServerError() + } + + return newIdent, nil + } + return ident, nil +} + +// updateOfflineSession updates offline session in the storage +func (s *Server) updateOfflineSession(refresh *storage.RefreshToken, ident connector.Identity, lastUsed time.Time) *refreshError { + offlineSessionUpdater := func(old storage.OfflineSessions) (storage.OfflineSessions, error) { + if old.Refresh[refresh.ClientID].ID != refresh.ID { + return old, errors.New("refresh token invalid") + } + + old.Refresh[refresh.ClientID].LastUsed = lastUsed + if len(ident.ConnectorData) > 0 { + old.ConnectorData = ident.ConnectorData + } + + s.logger.Debugf("saved connector data: %s %s", ident.UserID, ident.ConnectorData) + + return old, nil + } + + // Update LastUsed time stamp in refresh token reference object + // in offline session for the user. + err := s.storage.UpdateOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID, offlineSessionUpdater) + if err != nil { + s.logger.Errorf("failed to update offline session: %v", err) + return newInternalServerError() + } + + return nil +} + +// updateRefreshToken updates refresh token and offline session in the storage +func (s *Server) updateRefreshToken(ctx context.Context, rCtx *refreshContext) (*internal.RefreshToken, connector.Identity, *refreshError) { + var rerr *refreshError + + newToken := &internal.RefreshToken{ + Token: rCtx.requestToken.Token, + RefreshId: rCtx.requestToken.RefreshId, + } + + lastUsed := s.now() + + ident := connector.Identity{ + UserID: rCtx.storageToken.Claims.UserID, + Username: rCtx.storageToken.Claims.Username, + PreferredUsername: rCtx.storageToken.Claims.PreferredUsername, + Email: rCtx.storageToken.Claims.Email, + EmailVerified: rCtx.storageToken.Claims.EmailVerified, + Groups: rCtx.storageToken.Claims.Groups, + } + + refreshTokenUpdater := func(old storage.RefreshToken) (storage.RefreshToken, error) { + rotationEnabled := s.refreshTokenPolicy.RotationEnabled() + reusingAllowed := s.refreshTokenPolicy.AllowedToReuse(old.LastUsed) + + switch { + case !rotationEnabled && reusingAllowed: + // If rotation is disabled and the offline session was updated not so long ago - skip further actions. + old.ConnectorData = nil + return old, nil + + case rotationEnabled && reusingAllowed: + if old.Token != rCtx.requestToken.Token && old.ObsoleteToken != rCtx.requestToken.Token { + return old, errors.New("refresh token claimed twice") + } + + // Return previously generated token for all requests with an obsolete tokens + if old.ObsoleteToken == rCtx.requestToken.Token { + newToken.Token = old.Token + } + + // Do not update last used time for offline session if token is allowed to be reused + lastUsed = old.LastUsed + old.ConnectorData = nil + return old, nil + + case rotationEnabled && !reusingAllowed: + if old.Token != rCtx.requestToken.Token { + return old, errors.New("refresh token claimed twice") + } + + // Issue new refresh token + old.ObsoleteToken = old.Token + newToken.Token = storage.NewID() + } + + old.Token = newToken.Token + old.LastUsed = lastUsed + + // ConnectorData has been moved to OfflineSession + old.ConnectorData = nil + + // Call only once if there is a request which is not in the reuse interval. + // This is required to avoid multiple calls to the external IdP for concurrent requests. + // Dex will call the connector's Refresh method only once if request is not in reuse interval. + ident, rerr = s.refreshWithConnector(ctx, rCtx, ident) + if rerr != nil { + return old, rerr + } + + // Update the claims of the refresh token. + // + // UserID intentionally ignored for now. + old.Claims.Username = ident.Username + old.Claims.PreferredUsername = ident.PreferredUsername + old.Claims.Email = ident.Email + old.Claims.EmailVerified = ident.EmailVerified + old.Claims.Groups = ident.Groups + + return old, nil + } + + // Update refresh token in the storage. + err := s.storage.UpdateRefreshToken(rCtx.storageToken.ID, refreshTokenUpdater) + if err != nil { + s.logger.Errorf("failed to update refresh token: %v", err) + return nil, ident, newInternalServerError() + } + + rerr = s.updateOfflineSession(rCtx.storageToken, ident, lastUsed) + if rerr != nil { + return nil, ident, rerr + } + + return newToken, ident, nil +} + +// handleRefreshToken handles a refresh token request https://tools.ietf.org/html/rfc6749#section-6 +// this method is the entrypoint for refresh tokens handling +func (s *Server) handleRefreshToken(w http.ResponseWriter, r *http.Request, client storage.Client) { + token, rerr := s.extractRefreshTokenFromRequest(r) + if rerr != nil { + s.refreshTokenErrHelper(w, rerr) + return + } + + rCtx, rerr := s.getRefreshTokenFromStorage(client.ID, token) + if rerr != nil { + s.refreshTokenErrHelper(w, rerr) + return + } + + rCtx.scopes, rerr = s.getRefreshScopes(r, rCtx.storageToken) + if rerr != nil { + s.refreshTokenErrHelper(w, rerr) + return + } + + newToken, ident, rerr := s.updateRefreshToken(r.Context(), rCtx) + if rerr != nil { + s.refreshTokenErrHelper(w, rerr) + return + } + + claims := storage.Claims{ + UserID: ident.UserID, + Username: ident.Username, + PreferredUsername: ident.PreferredUsername, + Email: ident.Email, + EmailVerified: ident.EmailVerified, + Groups: ident.Groups, + } + + accessToken, _, err := s.newAccessToken(client.ID, claims, rCtx.scopes, rCtx.storageToken.Nonce, rCtx.storageToken.ConnectorID) + if err != nil { + s.logger.Errorf("failed to create new access token: %v", err) + s.refreshTokenErrHelper(w, newInternalServerError()) + return + } + + idToken, expiry, err := s.newIDToken(client.ID, claims, rCtx.scopes, rCtx.storageToken.Nonce, accessToken, "", rCtx.storageToken.ConnectorID) + if err != nil { + s.logger.Errorf("failed to create ID token: %v", err) + s.refreshTokenErrHelper(w, newInternalServerError()) + return + } + + rawNewToken, err := internal.Marshal(newToken) + if err != nil { + s.logger.Errorf("failed to marshal refresh token: %v", err) + s.refreshTokenErrHelper(w, newInternalServerError()) + return + } + + resp := s.toAccessTokenResponse(idToken, accessToken, rawNewToken, expiry) + s.writeAccessToken(w, resp) +} diff --git a/vendor/github.com/dexidp/dex/server/rotation.go b/vendor/github.com/dexidp/dex/server/rotation.go new file mode 100644 index 00000000..98489767 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/rotation.go @@ -0,0 +1,249 @@ +package server + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "encoding/hex" + "errors" + "fmt" + "io" + "time" + + "gopkg.in/square/go-jose.v2" + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" +) + +var errAlreadyRotated = errors.New("keys already rotated by another server instance") + +// rotationStrategy describes a strategy for generating cryptographic keys, how +// often to rotate them, and how long they can validate signatures after rotation. +type rotationStrategy struct { + // Time between rotations. + rotationFrequency time.Duration + + // After being rotated how long should the key be kept around for validating + // signatures? + idTokenValidFor time.Duration + + // Keys are always RSA keys. Though cryptopasta recommends ECDSA keys, not every + // client may support these (e.g. github.com/coreos/go-oidc/oidc). + key func() (*rsa.PrivateKey, error) +} + +// staticRotationStrategy returns a strategy which never rotates keys. +func staticRotationStrategy(key *rsa.PrivateKey) rotationStrategy { + return rotationStrategy{ + // Setting these values to 100 years is easier than having a flag indicating no rotation. + rotationFrequency: time.Hour * 8760 * 100, + idTokenValidFor: time.Hour * 8760 * 100, + key: func() (*rsa.PrivateKey, error) { return key, nil }, + } +} + +// defaultRotationStrategy returns a strategy which rotates keys every provided period, +// holding onto the public parts for some specified amount of time. +func defaultRotationStrategy(rotationFrequency, idTokenValidFor time.Duration) rotationStrategy { + return rotationStrategy{ + rotationFrequency: rotationFrequency, + idTokenValidFor: idTokenValidFor, + key: func() (*rsa.PrivateKey, error) { + return rsa.GenerateKey(rand.Reader, 2048) + }, + } +} + +type keyRotator struct { + storage.Storage + + strategy rotationStrategy + now func() time.Time + + logger log.Logger +} + +// startKeyRotation begins key rotation in a new goroutine, closing once the context is canceled. +// +// The method blocks until after the first attempt to rotate keys has completed. That way +// healthy storages will return from this call with valid keys. +func (s *Server) startKeyRotation(ctx context.Context, strategy rotationStrategy, now func() time.Time) { + rotator := keyRotator{s.storage, strategy, now, s.logger} + + // Try to rotate immediately so properly configured storages will have keys. + if err := rotator.rotate(); err != nil { + if err == errAlreadyRotated { + s.logger.Infof("Key rotation not needed: %v", err) + } else { + s.logger.Errorf("failed to rotate keys: %v", err) + } + } + + go func() { + for { + select { + case <-ctx.Done(): + return + case <-time.After(time.Second * 30): + if err := rotator.rotate(); err != nil { + s.logger.Errorf("failed to rotate keys: %v", err) + } + } + } + }() +} + +func (k keyRotator) rotate() error { + keys, err := k.GetKeys() + if err != nil && err != storage.ErrNotFound { + return fmt.Errorf("get keys: %v", err) + } + if k.now().Before(keys.NextRotation) { + return nil + } + k.logger.Infof("keys expired, rotating") + + // Generate the key outside of a storage transaction. + key, err := k.strategy.key() + if err != nil { + return fmt.Errorf("generate key: %v", err) + } + b := make([]byte, 20) + if _, err := io.ReadFull(rand.Reader, b); err != nil { + panic(err) + } + keyID := hex.EncodeToString(b) + priv := &jose.JSONWebKey{ + Key: key, + KeyID: keyID, + Algorithm: "RS256", + Use: "sig", + } + pub := &jose.JSONWebKey{ + Key: key.Public(), + KeyID: keyID, + Algorithm: "RS256", + Use: "sig", + } + + var nextRotation time.Time + err = k.Storage.UpdateKeys(func(keys storage.Keys) (storage.Keys, error) { + tNow := k.now() + + // if you are running multiple instances of dex, another instance + // could have already rotated the keys. + if tNow.Before(keys.NextRotation) { + return storage.Keys{}, errAlreadyRotated + } + + expired := func(key storage.VerificationKey) bool { + return tNow.After(key.Expiry) + } + + // Remove any verification keys that have expired. + i := 0 + for _, key := range keys.VerificationKeys { + if !expired(key) { + keys.VerificationKeys[i] = key + i++ + } + } + keys.VerificationKeys = keys.VerificationKeys[:i] + + if keys.SigningKeyPub != nil { + // Move current signing key to a verification only key, throwing + // away the private part. + verificationKey := storage.VerificationKey{ + PublicKey: keys.SigningKeyPub, + // After demoting the signing key, keep the token around for at least + // the amount of time an ID Token is valid for. This ensures the + // verification key won't expire until all ID Tokens it's signed + // expired as well. + Expiry: tNow.Add(k.strategy.idTokenValidFor), + } + keys.VerificationKeys = append(keys.VerificationKeys, verificationKey) + } + + nextRotation = k.now().Add(k.strategy.rotationFrequency) + keys.SigningKey = priv + keys.SigningKeyPub = pub + keys.NextRotation = nextRotation + return keys, nil + }) + if err != nil { + return err + } + k.logger.Infof("keys rotated, next rotation: %s", nextRotation) + return nil +} + +type RefreshTokenPolicy struct { + rotateRefreshTokens bool // enable rotation + + absoluteLifetime time.Duration // interval from token creation to the end of its life + validIfNotUsedFor time.Duration // interval from last token update to the end of its life + reuseInterval time.Duration // interval within which old refresh token is allowed to be reused + + now func() time.Time + + logger log.Logger +} + +func NewRefreshTokenPolicy(logger log.Logger, rotation bool, validIfNotUsedFor, absoluteLifetime, reuseInterval string) (*RefreshTokenPolicy, error) { + r := RefreshTokenPolicy{now: time.Now, logger: logger} + var err error + + if validIfNotUsedFor != "" { + r.validIfNotUsedFor, err = time.ParseDuration(validIfNotUsedFor) + if err != nil { + return nil, fmt.Errorf("invalid config value %q for refresh token valid if not used for: %v", validIfNotUsedFor, err) + } + logger.Infof("config refresh tokens valid if not used for: %v", validIfNotUsedFor) + } + + if absoluteLifetime != "" { + r.absoluteLifetime, err = time.ParseDuration(absoluteLifetime) + if err != nil { + return nil, fmt.Errorf("invalid config value %q for refresh tokens absolute lifetime: %v", absoluteLifetime, err) + } + logger.Infof("config refresh tokens absolute lifetime: %v", absoluteLifetime) + } + + if reuseInterval != "" { + r.reuseInterval, err = time.ParseDuration(reuseInterval) + if err != nil { + return nil, fmt.Errorf("invalid config value %q for refresh tokens reuse interval: %v", reuseInterval, err) + } + logger.Infof("config refresh tokens reuse interval: %v", reuseInterval) + } + + r.rotateRefreshTokens = !rotation + logger.Infof("config refresh tokens rotation enabled: %v", r.rotateRefreshTokens) + return &r, nil +} + +func (r *RefreshTokenPolicy) RotationEnabled() bool { + return r.rotateRefreshTokens +} + +func (r *RefreshTokenPolicy) CompletelyExpired(lastUsed time.Time) bool { + if r.absoluteLifetime == 0 { + return false // expiration disabled + } + return r.now().After(lastUsed.Add(r.absoluteLifetime)) +} + +func (r *RefreshTokenPolicy) ExpiredBecauseUnused(lastUsed time.Time) bool { + if r.validIfNotUsedFor == 0 { + return false // expiration disabled + } + return r.now().After(lastUsed.Add(r.validIfNotUsedFor)) +} + +func (r *RefreshTokenPolicy) AllowedToReuse(lastUsed time.Time) bool { + if r.reuseInterval == 0 { + return false // expiration disabled + } + return !r.now().After(lastUsed.Add(r.reuseInterval)) +} diff --git a/vendor/github.com/dexidp/dex/server/server.go b/vendor/github.com/dexidp/dex/server/server.go new file mode 100644 index 00000000..bf83dd81 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/server.go @@ -0,0 +1,657 @@ +package server + +import ( + "context" + "crypto/rsa" + "encoding/json" + "errors" + "fmt" + "io/fs" + "net/http" + "net/url" + "os" + "path" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + gosundheit "github.com/AppsFlyer/go-sundheit" + "github.com/felixge/httpsnoop" + "github.com/gorilla/handlers" + "github.com/gorilla/mux" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/crypto/bcrypt" + + "github.com/dexidp/dex/connector" + "github.com/dexidp/dex/connector/atlassiancrowd" + "github.com/dexidp/dex/connector/authproxy" + "github.com/dexidp/dex/connector/bitbucketcloud" + "github.com/dexidp/dex/connector/gitea" + "github.com/dexidp/dex/connector/github" + "github.com/dexidp/dex/connector/gitlab" + "github.com/dexidp/dex/connector/google" + "github.com/dexidp/dex/connector/keystone" + "github.com/dexidp/dex/connector/ldap" + "github.com/dexidp/dex/connector/linkedin" + "github.com/dexidp/dex/connector/microsoft" + "github.com/dexidp/dex/connector/mock" + "github.com/dexidp/dex/connector/oauth" + "github.com/dexidp/dex/connector/oidc" + "github.com/dexidp/dex/connector/openshift" + "github.com/dexidp/dex/connector/saml" + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/web" +) + +// LocalConnector is the local passwordDB connector which is an internal +// connector maintained by the server. +const LocalConnector = "local" + +// Connector is a connector with resource version metadata. +type Connector struct { + ResourceVersion string + Connector connector.Connector +} + +// Config holds the server's configuration options. +// +// Multiple servers using the same storage are expected to be configured identically. +type Config struct { + Issuer string + + // The backing persistence layer. + Storage storage.Storage + + AllowedGrantTypes []string + + // Valid values are "code" to enable the code flow and "token" to enable the implicit + // flow. If no response types are supplied this value defaults to "code". + SupportedResponseTypes []string + + // List of allowed origins for CORS requests on discovery, token and keys endpoint. + // If none are indicated, CORS requests are disabled. Passing in "*" will allow any + // domain. + AllowedOrigins []string + + // If enabled, the server won't prompt the user to approve authorization requests. + // Logging in implies approval. + SkipApprovalScreen bool + + // If enabled, the connectors selection page will always be shown even if there's only one + AlwaysShowLoginScreen bool + + RotateKeysAfter time.Duration // Defaults to 6 hours. + IDTokensValidFor time.Duration // Defaults to 24 hours + AuthRequestsValidFor time.Duration // Defaults to 24 hours + DeviceRequestsValidFor time.Duration // Defaults to 5 minutes + + // Refresh token expiration settings + RefreshTokenPolicy *RefreshTokenPolicy + + // If set, the server will use this connector to handle password grants + PasswordConnector string + + GCFrequency time.Duration // Defaults to 5 minutes + + // If specified, the server will use this function for determining time. + Now func() time.Time + + Web WebConfig + + Logger log.Logger + + PrometheusRegistry *prometheus.Registry + + HealthChecker gosundheit.Health +} + +// WebConfig holds the server's frontend templates and asset configuration. +type WebConfig struct { + // A file path to static web assets. + // + // It is expected to contain the following directories: + // + // * static - Static static served at "( issuer URL )/static". + // * templates - HTML templates controlled by dex. + // * themes/(theme) - Static static served at "( issuer URL )/theme". + Dir string + + // Alternative way to programmatically configure static web assets. + // If Dir is specified, WebFS is ignored. + // It's expected to contain the same files and directories as mentioned above. + // + // Note: this is experimental. Might get removed without notice! + WebFS fs.FS + + // Defaults to "( issuer URL )/theme/logo.png" + LogoURL string + + // Defaults to "dex" + Issuer string + + // Defaults to "light" + Theme string + + // Map of extra values passed into the templates + Extra map[string]string +} + +func value(val, defaultValue time.Duration) time.Duration { + if val == 0 { + return defaultValue + } + return val +} + +// Server is the top level object. +type Server struct { + issuerURL url.URL + + // mutex for the connectors map. + mu sync.Mutex + // Map of connector IDs to connectors. + connectors map[string]Connector + + storage storage.Storage + + mux http.Handler + + templates *templates + + // If enabled, don't prompt user for approval after logging in through connector. + skipApproval bool + + // If enabled, show the connector selection screen even if there's only one + alwaysShowLogin bool + + // Used for password grant + passwordConnector string + + supportedResponseTypes map[string]bool + + supportedGrantTypes []string + + now func() time.Time + + idTokensValidFor time.Duration + authRequestsValidFor time.Duration + deviceRequestsValidFor time.Duration + + refreshTokenPolicy *RefreshTokenPolicy + + logger log.Logger +} + +// NewServer constructs a server from the provided config. +func NewServer(ctx context.Context, c Config) (*Server, error) { + return newServer(ctx, c, defaultRotationStrategy( + value(c.RotateKeysAfter, 6*time.Hour), + value(c.IDTokensValidFor, 24*time.Hour), + )) +} + +// NewServerWithKey constructs a server from the provided config and a static signing key. +func NewServerWithKey(ctx context.Context, c Config, privateKey *rsa.PrivateKey) (*Server, error) { + return newServer(ctx, c, staticRotationStrategy( + privateKey, + )) +} + +func newServer(ctx context.Context, c Config, rotationStrategy rotationStrategy) (*Server, error) { + issuerURL, err := url.Parse(c.Issuer) + if err != nil { + return nil, fmt.Errorf("server: can't parse issuer URL") + } + + if c.Storage == nil { + return nil, errors.New("server: storage cannot be nil") + } + + if len(c.SupportedResponseTypes) == 0 { + c.SupportedResponseTypes = []string{responseTypeCode} + } + + allSupportedGrants := map[string]bool{ + grantTypeAuthorizationCode: true, + grantTypeRefreshToken: true, + grantTypeDeviceCode: true, + grantTypeTokenExchange: true, + } + supportedRes := make(map[string]bool) + + for _, respType := range c.SupportedResponseTypes { + switch respType { + case responseTypeCode, responseTypeIDToken, responseTypeCodeIDToken: + // continue + case responseTypeToken, responseTypeCodeToken, responseTypeIDTokenToken, responseTypeCodeIDTokenToken: + // response_type=token is an implicit flow, let's add it to the discovery info + // https://datatracker.ietf.org/doc/html/rfc6749#section-4.2.1 + allSupportedGrants[grantTypeImplicit] = true + default: + return nil, fmt.Errorf("unsupported response_type %q", respType) + } + supportedRes[respType] = true + } + + if c.PasswordConnector != "" { + allSupportedGrants[grantTypePassword] = true + } + + var supportedGrants []string + if len(c.AllowedGrantTypes) > 0 { + for _, grant := range c.AllowedGrantTypes { + if allSupportedGrants[grant] { + supportedGrants = append(supportedGrants, grant) + } + } + } else { + for grant := range allSupportedGrants { + supportedGrants = append(supportedGrants, grant) + } + } + sort.Strings(supportedGrants) + + webFS := web.FS() + if c.Web.Dir != "" { + webFS = os.DirFS(c.Web.Dir) + } else if c.Web.WebFS != nil { + webFS = c.Web.WebFS + } + + web := webConfig{ + webFS: webFS, + logoURL: c.Web.LogoURL, + issuerURL: c.Issuer, + issuer: c.Web.Issuer, + theme: c.Web.Theme, + extra: c.Web.Extra, + } + + static, theme, robots, tmpls, err := loadWebConfig(web) + if err != nil { + return nil, fmt.Errorf("server: failed to load web static: %v", err) + } + + now := c.Now + if now == nil { + now = time.Now + } + + s := &Server{ + issuerURL: *issuerURL, + connectors: make(map[string]Connector), + storage: newKeyCacher(c.Storage, now), + supportedResponseTypes: supportedRes, + supportedGrantTypes: supportedGrants, + idTokensValidFor: value(c.IDTokensValidFor, 24*time.Hour), + authRequestsValidFor: value(c.AuthRequestsValidFor, 24*time.Hour), + deviceRequestsValidFor: value(c.DeviceRequestsValidFor, 5*time.Minute), + refreshTokenPolicy: c.RefreshTokenPolicy, + skipApproval: c.SkipApprovalScreen, + alwaysShowLogin: c.AlwaysShowLoginScreen, + now: now, + templates: tmpls, + passwordConnector: c.PasswordConnector, + logger: c.Logger, + } + + // Retrieves connector objects in backend storage. This list includes the static connectors + // defined in the ConfigMap and dynamic connectors retrieved from the storage. + storageConnectors, err := c.Storage.ListConnectors() + if err != nil { + return nil, fmt.Errorf("server: failed to list connector objects from storage: %v", err) + } + + if len(storageConnectors) == 0 && len(s.connectors) == 0 { + return nil, errors.New("server: no connectors specified") + } + + for _, conn := range storageConnectors { + if _, err := s.OpenConnector(conn); err != nil { + return nil, fmt.Errorf("server: Failed to open connector %s: %v", conn.ID, err) + } + } + + instrumentHandlerCounter := func(_ string, handler http.Handler) http.HandlerFunc { + return handler.ServeHTTP + } + + if c.PrometheusRegistry != nil { + requestCounter := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "Count of all HTTP requests.", + }, []string{"handler", "code", "method"}) + + err = c.PrometheusRegistry.Register(requestCounter) + if err != nil { + return nil, fmt.Errorf("server: Failed to register Prometheus HTTP metrics: %v", err) + } + + instrumentHandlerCounter = func(handlerName string, handler http.Handler) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(handler, w, r) + requestCounter.With(prometheus.Labels{"handler": handlerName, "code": strconv.Itoa(m.Code), "method": r.Method}).Inc() + } + } + } + + r := mux.NewRouter().SkipClean(true).UseEncodedPath() + handle := func(p string, h http.Handler) { + r.Handle(path.Join(issuerURL.Path, p), instrumentHandlerCounter(p, h)) + } + handleFunc := func(p string, h http.HandlerFunc) { + handle(p, h) + } + handlePrefix := func(p string, h http.Handler) { + prefix := path.Join(issuerURL.Path, p) + r.PathPrefix(prefix).Handler(http.StripPrefix(prefix, h)) + } + handleWithCORS := func(p string, h http.HandlerFunc) { + var handler http.Handler = h + if len(c.AllowedOrigins) > 0 { + allowedHeaders := []string{ + "Authorization", + } + cors := handlers.CORS( + handlers.AllowedOrigins(c.AllowedOrigins), + handlers.AllowedHeaders(allowedHeaders), + ) + handler = cors(handler) + } + r.Handle(path.Join(issuerURL.Path, p), instrumentHandlerCounter(p, handler)) + } + r.NotFoundHandler = http.NotFoundHandler() + + discoveryHandler, err := s.discoveryHandler() + if err != nil { + return nil, err + } + handleWithCORS("/.well-known/openid-configuration", discoveryHandler) + + // TODO(ericchiang): rate limit certain paths based on IP. + handleWithCORS("/token", s.handleToken) + handleWithCORS("/keys", s.handlePublicKeys) + handleWithCORS("/userinfo", s.handleUserInfo) + handleFunc("/auth", s.handleAuthorization) + handleFunc("/auth/{connector}", s.handleConnectorLogin) + handleFunc("/auth/{connector}/login", s.handlePasswordLogin) + handleFunc("/device", s.handleDeviceExchange) + handleFunc("/device/auth/verify_code", s.verifyUserCode) + handleFunc("/device/code", s.handleDeviceCode) + // TODO(nabokihms): "/device/token" endpoint is deprecated, consider using /token endpoint instead + handleFunc("/device/token", s.handleDeviceTokenDeprecated) + handleFunc(deviceCallbackURI, s.handleDeviceCallback) + r.HandleFunc(path.Join(issuerURL.Path, "/callback"), func(w http.ResponseWriter, r *http.Request) { + // Strip the X-Remote-* headers to prevent security issues on + // misconfigured authproxy connector setups. + for key := range r.Header { + if strings.HasPrefix(strings.ToLower(key), "x-remote-") { + r.Header.Del(key) + } + } + s.handleConnectorCallback(w, r) + }) + // For easier connector-specific web server configuration, e.g. for the + // "authproxy" connector. + handleFunc("/callback/{connector}", s.handleConnectorCallback) + handleFunc("/approval", s.handleApproval) + handle("/healthz", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !c.HealthChecker.IsHealthy() { + s.renderError(r, w, http.StatusInternalServerError, "Health check failed.") + return + } + fmt.Fprintf(w, "Health check passed") + })) + + handlePrefix("/static", static) + handlePrefix("/theme", theme) + handleFunc("/robots.txt", robots) + + s.mux = r + + s.startKeyRotation(ctx, rotationStrategy, now) + s.startGarbageCollection(ctx, value(c.GCFrequency, 5*time.Minute), now) + + return s, nil +} + +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.mux.ServeHTTP(w, r) +} + +func (s *Server) absPath(pathItems ...string) string { + paths := make([]string, len(pathItems)+1) + paths[0] = s.issuerURL.Path + copy(paths[1:], pathItems) + return path.Join(paths...) +} + +func (s *Server) absURL(pathItems ...string) string { + u := s.issuerURL + u.Path = s.absPath(pathItems...) + return u.String() +} + +func newPasswordDB(s storage.Storage) interface { + connector.Connector + connector.PasswordConnector +} { + return passwordDB{s} +} + +type passwordDB struct { + s storage.Storage +} + +func (db passwordDB) Login(ctx context.Context, s connector.Scopes, email, password string) (connector.Identity, bool, error) { + p, err := db.s.GetPassword(email) + if err != nil { + if err != storage.ErrNotFound { + return connector.Identity{}, false, fmt.Errorf("get password: %v", err) + } + return connector.Identity{}, false, nil + } + // This check prevents dex users from logging in using static passwords + // configured with hash costs that are too high or low. + if err := checkCost(p.Hash); err != nil { + return connector.Identity{}, false, err + } + if err := bcrypt.CompareHashAndPassword(p.Hash, []byte(password)); err != nil { + return connector.Identity{}, false, nil + } + return connector.Identity{ + UserID: p.UserID, + Username: p.Username, + Email: p.Email, + EmailVerified: true, + }, true, nil +} + +func (db passwordDB) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { + // If the user has been deleted, the refresh token will be rejected. + p, err := db.s.GetPassword(identity.Email) + if err != nil { + if err == storage.ErrNotFound { + return connector.Identity{}, errors.New("user not found") + } + return connector.Identity{}, fmt.Errorf("get password: %v", err) + } + + // User removed but a new user with the same email exists. + if p.UserID != identity.UserID { + return connector.Identity{}, errors.New("user not found") + } + + // If a user has updated their username, that will be reflected in the + // refreshed token. + // + // No other fields are expected to be refreshable as email is effectively used + // as an ID and this implementation doesn't deal with groups. + identity.Username = p.Username + + return identity, nil +} + +func (db passwordDB) Prompt() string { + return "Email Address" +} + +// newKeyCacher returns a storage which caches keys so long as the next +func newKeyCacher(s storage.Storage, now func() time.Time) storage.Storage { + if now == nil { + now = time.Now + } + return &keyCacher{Storage: s, now: now} +} + +type keyCacher struct { + storage.Storage + + now func() time.Time + keys atomic.Value // Always holds nil or type *storage.Keys. +} + +func (k *keyCacher) GetKeys() (storage.Keys, error) { + keys, ok := k.keys.Load().(*storage.Keys) + if ok && keys != nil && k.now().Before(keys.NextRotation) { + return *keys, nil + } + + storageKeys, err := k.Storage.GetKeys() + if err != nil { + return storageKeys, err + } + + if k.now().Before(storageKeys.NextRotation) { + k.keys.Store(&storageKeys) + } + return storageKeys, nil +} + +func (s *Server) startGarbageCollection(ctx context.Context, frequency time.Duration, now func() time.Time) { + go func() { + for { + select { + case <-ctx.Done(): + return + case <-time.After(frequency): + if r, err := s.storage.GarbageCollect(now()); err != nil { + s.logger.Errorf("garbage collection failed: %v", err) + } else if !r.IsEmpty() { + s.logger.Infof("garbage collection run, delete auth requests=%d, auth codes=%d, device requests=%d, device tokens=%d", + r.AuthRequests, r.AuthCodes, r.DeviceRequests, r.DeviceTokens) + } + } + } + }() +} + +// ConnectorConfig is a configuration that can open a connector. +type ConnectorConfig interface { + Open(id string, logger log.Logger) (connector.Connector, error) +} + +// ConnectorsConfig variable provides an easy way to return a config struct +// depending on the connector type. +var ConnectorsConfig = map[string]func() ConnectorConfig{ + "keystone": func() ConnectorConfig { return new(keystone.Config) }, + "mockCallback": func() ConnectorConfig { return new(mock.CallbackConfig) }, + "mockPassword": func() ConnectorConfig { return new(mock.PasswordConfig) }, + "ldap": func() ConnectorConfig { return new(ldap.Config) }, + "gitea": func() ConnectorConfig { return new(gitea.Config) }, + "github": func() ConnectorConfig { return new(github.Config) }, + "gitlab": func() ConnectorConfig { return new(gitlab.Config) }, + "google": func() ConnectorConfig { return new(google.Config) }, + "oidc": func() ConnectorConfig { return new(oidc.Config) }, + "oauth": func() ConnectorConfig { return new(oauth.Config) }, + "saml": func() ConnectorConfig { return new(saml.Config) }, + "authproxy": func() ConnectorConfig { return new(authproxy.Config) }, + "linkedin": func() ConnectorConfig { return new(linkedin.Config) }, + "microsoft": func() ConnectorConfig { return new(microsoft.Config) }, + "bitbucket-cloud": func() ConnectorConfig { return new(bitbucketcloud.Config) }, + "openshift": func() ConnectorConfig { return new(openshift.Config) }, + "atlassian-crowd": func() ConnectorConfig { return new(atlassiancrowd.Config) }, + // Keep around for backwards compatibility. + "samlExperimental": func() ConnectorConfig { return new(saml.Config) }, +} + +// openConnector will parse the connector config and open the connector. +func openConnector(logger log.Logger, conn storage.Connector) (connector.Connector, error) { + var c connector.Connector + + f, ok := ConnectorsConfig[conn.Type] + if !ok { + return c, fmt.Errorf("unknown connector type %q", conn.Type) + } + + connConfig := f() + if len(conn.Config) != 0 { + data := []byte(string(conn.Config)) + if err := json.Unmarshal(data, connConfig); err != nil { + return c, fmt.Errorf("parse connector config: %v", err) + } + } + + c, err := connConfig.Open(conn.ID, logger) + if err != nil { + return c, fmt.Errorf("failed to create connector %s: %v", conn.ID, err) + } + + return c, nil +} + +// OpenConnector updates server connector map with specified connector object. +func (s *Server) OpenConnector(conn storage.Connector) (Connector, error) { + var c connector.Connector + + if conn.Type == LocalConnector { + c = newPasswordDB(s.storage) + } else { + var err error + c, err = openConnector(s.logger, conn) + if err != nil { + return Connector{}, fmt.Errorf("failed to open connector: %v", err) + } + } + + connector := Connector{ + ResourceVersion: conn.ResourceVersion, + Connector: c, + } + s.mu.Lock() + s.connectors[conn.ID] = connector + s.mu.Unlock() + + return connector, nil +} + +// getConnector retrieves the connector object with the given id from the storage +// and updates the connector list for server if necessary. +func (s *Server) getConnector(id string) (Connector, error) { + storageConnector, err := s.storage.GetConnector(id) + if err != nil { + return Connector{}, fmt.Errorf("failed to get connector object from storage: %v", err) + } + + var conn Connector + var ok bool + s.mu.Lock() + conn, ok = s.connectors[id] + s.mu.Unlock() + + if !ok || storageConnector.ResourceVersion != conn.ResourceVersion { + // Connector object does not exist in server connectors map or + // has been updated in the storage. Need to get latest. + conn, err := s.OpenConnector(storageConnector) + if err != nil { + return Connector{}, fmt.Errorf("failed to open connector: %v", err) + } + return conn, nil + } + + return conn, nil +} diff --git a/vendor/github.com/dexidp/dex/server/templates.go b/vendor/github.com/dexidp/dex/server/templates.go new file mode 100644 index 00000000..b77663e1 --- /dev/null +++ b/vendor/github.com/dexidp/dex/server/templates.go @@ -0,0 +1,370 @@ +package server + +import ( + "fmt" + "html/template" + "io" + "io/fs" + "net/http" + "net/url" + "path" + "sort" + "strings" + + "github.com/Masterminds/sprig/v3" +) + +const ( + tmplApproval = "approval.html" + tmplLogin = "login.html" + tmplPassword = "password.html" + tmplOOB = "oob.html" + tmplError = "error.html" + tmplDevice = "device.html" + tmplDeviceSuccess = "device_success.html" +) + +var requiredTmpls = []string{ + tmplApproval, + tmplLogin, + tmplPassword, + tmplOOB, + tmplError, + tmplDevice, + tmplDeviceSuccess, +} + +type templates struct { + loginTmpl *template.Template + approvalTmpl *template.Template + passwordTmpl *template.Template + oobTmpl *template.Template + errorTmpl *template.Template + deviceTmpl *template.Template + deviceSuccessTmpl *template.Template +} + +type webConfig struct { + webFS fs.FS + logoURL string + issuer string + theme string + issuerURL string + extra map[string]string +} + +func getFuncMap(c webConfig) (template.FuncMap, error) { + funcs := sprig.FuncMap() + + issuerURL, err := url.Parse(c.issuerURL) + if err != nil { + return nil, fmt.Errorf("error parsing issuerURL: %v", err) + } + + additionalFuncs := map[string]interface{}{ + "extra": func(k string) string { return c.extra[k] }, + "issuer": func() string { return c.issuer }, + "logo": func() string { return c.logoURL }, + "url": func(reqPath, assetPath string) string { + return relativeURL(issuerURL.Path, reqPath, assetPath) + }, + } + + for k, v := range additionalFuncs { + funcs[k] = v + } + + return funcs, nil +} + +// loadWebConfig returns static assets, theme assets, and templates used by the frontend by +// reading the dir specified in the webConfig. If directory is not specified it will +// use the file system specified by webFS. +// +// The directory layout is expected to be: +// +// ( web directory ) +// |- static +// |- themes +// | |- (theme name) +// |- templates +func loadWebConfig(c webConfig) (http.Handler, http.Handler, http.HandlerFunc, *templates, error) { + // fallback to the default theme if the legacy theme name is provided + if c.theme == "coreos" || c.theme == "tectonic" { + c.theme = "" + } + if c.theme == "" { + c.theme = "light" + } + if c.issuer == "" { + c.issuer = "dex" + } + if c.logoURL == "" { + c.logoURL = "theme/logo.png" + } + + staticFiles, err := fs.Sub(c.webFS, "static") + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("read static dir: %v", err) + } + themeFiles, err := fs.Sub(c.webFS, path.Join("themes", c.theme)) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("read themes dir: %v", err) + } + robotsContent, err := fs.ReadFile(c.webFS, "robots.txt") + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("read robots.txt dir: %v", err) + } + + static := http.FileServer(http.FS(staticFiles)) + theme := http.FileServer(http.FS(themeFiles)) + robots := func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, string(robotsContent)) } + + templates, err := loadTemplates(c, "templates") + + return static, theme, robots, templates, err +} + +// loadTemplates parses the expected templates from the provided directory. +func loadTemplates(c webConfig, templatesDir string) (*templates, error) { + files, err := fs.ReadDir(c.webFS, templatesDir) + if err != nil { + return nil, fmt.Errorf("read dir: %v", err) + } + + filenames := []string{} + for _, file := range files { + if file.IsDir() { + continue + } + filenames = append(filenames, path.Join(templatesDir, file.Name())) + } + if len(filenames) == 0 { + return nil, fmt.Errorf("no files in template dir %q", templatesDir) + } + + funcs, err := getFuncMap(c) + if err != nil { + return nil, err + } + + tmpls, err := template.New("").Funcs(funcs).ParseFS(c.webFS, filenames...) + if err != nil { + return nil, fmt.Errorf("parse files: %v", err) + } + missingTmpls := []string{} + for _, tmplName := range requiredTmpls { + if tmpls.Lookup(tmplName) == nil { + missingTmpls = append(missingTmpls, tmplName) + } + } + if len(missingTmpls) > 0 { + return nil, fmt.Errorf("missing template(s): %s", missingTmpls) + } + return &templates{ + loginTmpl: tmpls.Lookup(tmplLogin), + approvalTmpl: tmpls.Lookup(tmplApproval), + passwordTmpl: tmpls.Lookup(tmplPassword), + oobTmpl: tmpls.Lookup(tmplOOB), + errorTmpl: tmpls.Lookup(tmplError), + deviceTmpl: tmpls.Lookup(tmplDevice), + deviceSuccessTmpl: tmpls.Lookup(tmplDeviceSuccess), + }, nil +} + +// relativeURL returns the URL of the asset relative to the URL of the request path. +// The serverPath is consulted to trim any prefix due in case it is not listening +// to the root path. +// +// Algorithm: +// 1. Remove common prefix of serverPath and reqPath +// 2. Remove common prefix of assetPath and reqPath +// 3. For each part of reqPath remaining(minus one), go up one level (..) +// 4. For each part of assetPath remaining, append it to result +// +// eg +// server listens at localhost/dex so serverPath is dex +// reqPath is /dex/auth +// assetPath is static/main.css +// relativeURL("/dex", "/dex/auth", "static/main.css") = "../static/main.css" +func relativeURL(serverPath, reqPath, assetPath string) string { + if u, err := url.ParseRequestURI(assetPath); err == nil && u.Scheme != "" { + // assetPath points to the external URL, no changes needed + return assetPath + } + + splitPath := func(p string) []string { + res := []string{} + parts := strings.Split(path.Clean(p), "/") + for _, part := range parts { + if part != "" { + res = append(res, part) + } + } + return res + } + + stripCommonParts := func(s1, s2 []string) ([]string, []string) { + min := len(s1) + if len(s2) < min { + min = len(s2) + } + + splitIndex := min + for i := 0; i < min; i++ { + if s1[i] != s2[i] { + splitIndex = i + break + } + } + return s1[splitIndex:], s2[splitIndex:] + } + + server, req, asset := splitPath(serverPath), splitPath(reqPath), splitPath(assetPath) + + // Remove common prefix of request path with server path + _, req = stripCommonParts(server, req) + + // Remove common prefix of request path with asset path + asset, req = stripCommonParts(asset, req) + + // For each part of the request remaining (minus one) -> go up one level (..) + // For each part of the asset remaining -> append it + var relativeURL string + for i := 0; i < len(req)-1; i++ { + relativeURL = path.Join("..", relativeURL) + } + relativeURL = path.Join(relativeURL, path.Join(asset...)) + + return relativeURL +} + +var scopeDescriptions = map[string]string{ + "offline_access": "Have offline access", + "profile": "View basic profile information", + "email": "View your email address", + // 'groups' is not a standard OIDC scope, and Dex only returns groups only if the upstream provider does too. + // This warning is added for convenience to show that the user may expose some sensitive data to the application. + "groups": "View your groups", +} + +type connectorInfo struct { + ID string + Name string + URL template.URL + Type string +} + +type byName []connectorInfo + +func (n byName) Len() int { return len(n) } +func (n byName) Less(i, j int) bool { return n[i].Name < n[j].Name } +func (n byName) Swap(i, j int) { n[i], n[j] = n[j], n[i] } + +func (t *templates) device(r *http.Request, w http.ResponseWriter, postURL string, userCode string, lastWasInvalid bool) error { + if lastWasInvalid { + w.WriteHeader(http.StatusBadRequest) + } + data := struct { + PostURL string + UserCode string + Invalid bool + ReqPath string + }{postURL, userCode, lastWasInvalid, r.URL.Path} + return renderTemplate(w, t.deviceTmpl, data) +} + +func (t *templates) deviceSuccess(r *http.Request, w http.ResponseWriter, clientName string) error { + data := struct { + ClientName string + ReqPath string + }{clientName, r.URL.Path} + return renderTemplate(w, t.deviceSuccessTmpl, data) +} + +func (t *templates) login(r *http.Request, w http.ResponseWriter, connectors []connectorInfo) error { + sort.Sort(byName(connectors)) + data := struct { + Connectors []connectorInfo + ReqPath string + }{connectors, r.URL.Path} + return renderTemplate(w, t.loginTmpl, data) +} + +func (t *templates) password(r *http.Request, w http.ResponseWriter, postURL, lastUsername, usernamePrompt string, lastWasInvalid bool, backLink string) error { + if lastWasInvalid { + w.WriteHeader(http.StatusUnauthorized) + } + data := struct { + PostURL string + BackLink string + Username string + UsernamePrompt string + Invalid bool + ReqPath string + }{postURL, backLink, lastUsername, usernamePrompt, lastWasInvalid, r.URL.Path} + return renderTemplate(w, t.passwordTmpl, data) +} + +func (t *templates) approval(r *http.Request, w http.ResponseWriter, authReqID, username, clientName string, scopes []string) error { + accesses := []string{} + for _, scope := range scopes { + access, ok := scopeDescriptions[scope] + if ok { + accesses = append(accesses, access) + } + } + sort.Strings(accesses) + data := struct { + User string + Client string + AuthReqID string + Scopes []string + ReqPath string + }{username, clientName, authReqID, accesses, r.URL.Path} + return renderTemplate(w, t.approvalTmpl, data) +} + +func (t *templates) oob(r *http.Request, w http.ResponseWriter, code string) error { + data := struct { + Code string + ReqPath string + }{code, r.URL.Path} + return renderTemplate(w, t.oobTmpl, data) +} + +func (t *templates) err(r *http.Request, w http.ResponseWriter, errCode int, errMsg string) error { + w.WriteHeader(errCode) + data := struct { + ErrType string + ErrMsg string + ReqPath string + }{http.StatusText(errCode), errMsg, r.URL.Path} + if err := t.errorTmpl.Execute(w, data); err != nil { + return fmt.Errorf("rendering template %s failed: %s", t.errorTmpl.Name(), err) + } + return nil +} + +// small io.Writer utility to determine if executing the template wrote to the underlying response writer. +type writeRecorder struct { + wrote bool + w io.Writer +} + +func (w *writeRecorder) Write(p []byte) (n int, err error) { + w.wrote = true + return w.w.Write(p) +} + +func renderTemplate(w http.ResponseWriter, tmpl *template.Template, data interface{}) error { + wr := &writeRecorder{w: w} + if err := tmpl.Execute(wr, data); err != nil { + if !wr.wrote { + // TODO(ericchiang): replace with better internal server error. + http.Error(w, "Internal server error", http.StatusInternalServerError) + } + return fmt.Errorf("rendering template %s failed: %s", tmpl.Name(), err) + } + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/BUILD b/vendor/github.com/dexidp/dex/storage/BUILD new file mode 100644 index 00000000..a42d2381 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/BUILD @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "storage", + srcs = [ + "doc.go", + "health.go", + "static.go", + "storage.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage", + importpath = "github.com/dexidp/dex/storage", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/gopkg.in/square/go-jose.v2:go-jose_v2", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/doc.go b/vendor/github.com/dexidp/dex/storage/doc.go new file mode 100644 index 00000000..0a2d76b4 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/doc.go @@ -0,0 +1,2 @@ +// Package storage defines the storage interface and types used by the server. +package storage diff --git a/vendor/github.com/dexidp/dex/storage/ent/BUILD b/vendor/github.com/dexidp/dex/storage/ent/BUILD new file mode 100644 index 00000000..e16f4a62 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ent", + srcs = [ + "generate.go", + "mysql.go", + "postgres.go", + "sqlite.go", + "types.go", + "utils.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent", + importpath = "github.com/dexidp/dex/storage/ent", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/github.com/dexidp/dex/storage", + "//vendor/github.com/dexidp/dex/storage/ent/client", + "//vendor/github.com/dexidp/dex/storage/ent/db", + "//vendor/github.com/go-sql-driver/mysql", + "//vendor/github.com/lib/pq", + "//vendor/github.com/mattn/go-sqlite3", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/BUILD b/vendor/github.com/dexidp/dex/storage/ent/client/BUILD new file mode 100644 index 00000000..db6d6440 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "client", + srcs = [ + "authcode.go", + "authrequest.go", + "client.go", + "connector.go", + "devicerequest.go", + "devicetoken.go", + "keys.go", + "main.go", + "offlinesession.go", + "password.go", + "refreshtoken.go", + "types.go", + "utils.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/client", + importpath = "github.com/dexidp/dex/storage/ent/client", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/storage", + "//vendor/github.com/dexidp/dex/storage/ent/db", + "//vendor/github.com/dexidp/dex/storage/ent/db/authcode", + "//vendor/github.com/dexidp/dex/storage/ent/db/authrequest", + "//vendor/github.com/dexidp/dex/storage/ent/db/devicerequest", + "//vendor/github.com/dexidp/dex/storage/ent/db/devicetoken", + "//vendor/github.com/dexidp/dex/storage/ent/db/migrate", + "//vendor/github.com/dexidp/dex/storage/ent/db/password", + "//vendor/github.com/pkg/errors", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/authcode.go b/vendor/github.com/dexidp/dex/storage/ent/client/authcode.go new file mode 100644 index 00000000..b6b263bf --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/authcode.go @@ -0,0 +1,52 @@ +package client + +import ( + "context" + + "github.com/dexidp/dex/storage" +) + +// CreateAuthCode saves provided auth code into the database. +func (d *Database) CreateAuthCode(code storage.AuthCode) error { + _, err := d.client.AuthCode.Create(). + SetID(code.ID). + SetClientID(code.ClientID). + SetScopes(code.Scopes). + SetRedirectURI(code.RedirectURI). + SetNonce(code.Nonce). + SetClaimsUserID(code.Claims.UserID). + SetClaimsEmail(code.Claims.Email). + SetClaimsEmailVerified(code.Claims.EmailVerified). + SetClaimsUsername(code.Claims.Username). + SetClaimsPreferredUsername(code.Claims.PreferredUsername). + SetClaimsGroups(code.Claims.Groups). + SetCodeChallenge(code.PKCE.CodeChallenge). + SetCodeChallengeMethod(code.PKCE.CodeChallengeMethod). + // Save utc time into database because ent doesn't support comparing dates with different timezones + SetExpiry(code.Expiry.UTC()). + SetConnectorID(code.ConnectorID). + SetConnectorData(code.ConnectorData). + Save(context.TODO()) + if err != nil { + return convertDBError("create auth code: %w", err) + } + return nil +} + +// GetAuthCode extracts an auth code from the database by id. +func (d *Database) GetAuthCode(id string) (storage.AuthCode, error) { + authCode, err := d.client.AuthCode.Get(context.TODO(), id) + if err != nil { + return storage.AuthCode{}, convertDBError("get auth code: %w", err) + } + return toStorageAuthCode(authCode), nil +} + +// DeleteAuthCode deletes an auth code from the database by id. +func (d *Database) DeleteAuthCode(id string) error { + err := d.client.AuthCode.DeleteOneID(id).Exec(context.TODO()) + if err != nil { + return convertDBError("delete auth code: %w", err) + } + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/authrequest.go b/vendor/github.com/dexidp/dex/storage/ent/client/authrequest.go new file mode 100644 index 00000000..d68fd438 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/authrequest.go @@ -0,0 +1,109 @@ +package client + +import ( + "context" + "fmt" + + "github.com/dexidp/dex/storage" +) + +// CreateAuthRequest saves provided auth request into the database. +func (d *Database) CreateAuthRequest(authRequest storage.AuthRequest) error { + _, err := d.client.AuthRequest.Create(). + SetID(authRequest.ID). + SetClientID(authRequest.ClientID). + SetScopes(authRequest.Scopes). + SetResponseTypes(authRequest.ResponseTypes). + SetRedirectURI(authRequest.RedirectURI). + SetState(authRequest.State). + SetNonce(authRequest.Nonce). + SetForceApprovalPrompt(authRequest.ForceApprovalPrompt). + SetLoggedIn(authRequest.LoggedIn). + SetClaimsUserID(authRequest.Claims.UserID). + SetClaimsEmail(authRequest.Claims.Email). + SetClaimsEmailVerified(authRequest.Claims.EmailVerified). + SetClaimsUsername(authRequest.Claims.Username). + SetClaimsPreferredUsername(authRequest.Claims.PreferredUsername). + SetClaimsGroups(authRequest.Claims.Groups). + SetCodeChallenge(authRequest.PKCE.CodeChallenge). + SetCodeChallengeMethod(authRequest.PKCE.CodeChallengeMethod). + // Save utc time into database because ent doesn't support comparing dates with different timezones + SetExpiry(authRequest.Expiry.UTC()). + SetConnectorID(authRequest.ConnectorID). + SetConnectorData(authRequest.ConnectorData). + SetHmacKey(authRequest.HMACKey). + Save(context.TODO()) + if err != nil { + return convertDBError("create auth request: %w", err) + } + return nil +} + +// GetAuthRequest extracts an auth request from the database by id. +func (d *Database) GetAuthRequest(id string) (storage.AuthRequest, error) { + authRequest, err := d.client.AuthRequest.Get(context.TODO(), id) + if err != nil { + return storage.AuthRequest{}, convertDBError("get auth request: %w", err) + } + return toStorageAuthRequest(authRequest), nil +} + +// DeleteAuthRequest deletes an auth request from the database by id. +func (d *Database) DeleteAuthRequest(id string) error { + err := d.client.AuthRequest.DeleteOneID(id).Exec(context.TODO()) + if err != nil { + return convertDBError("delete auth request: %w", err) + } + return nil +} + +// UpdateAuthRequest changes an auth request by id using an updater function and saves it to the database. +func (d *Database) UpdateAuthRequest(id string, updater func(old storage.AuthRequest) (storage.AuthRequest, error)) error { + tx, err := d.BeginTx(context.TODO()) + if err != nil { + return fmt.Errorf("update auth request tx: %w", err) + } + + authRequest, err := tx.AuthRequest.Get(context.TODO(), id) + if err != nil { + return rollback(tx, "update auth request database: %w", err) + } + + newAuthRequest, err := updater(toStorageAuthRequest(authRequest)) + if err != nil { + return rollback(tx, "update auth request updating: %w", err) + } + + _, err = tx.AuthRequest.UpdateOneID(newAuthRequest.ID). + SetClientID(newAuthRequest.ClientID). + SetScopes(newAuthRequest.Scopes). + SetResponseTypes(newAuthRequest.ResponseTypes). + SetRedirectURI(newAuthRequest.RedirectURI). + SetState(newAuthRequest.State). + SetNonce(newAuthRequest.Nonce). + SetForceApprovalPrompt(newAuthRequest.ForceApprovalPrompt). + SetLoggedIn(newAuthRequest.LoggedIn). + SetClaimsUserID(newAuthRequest.Claims.UserID). + SetClaimsEmail(newAuthRequest.Claims.Email). + SetClaimsEmailVerified(newAuthRequest.Claims.EmailVerified). + SetClaimsUsername(newAuthRequest.Claims.Username). + SetClaimsPreferredUsername(newAuthRequest.Claims.PreferredUsername). + SetClaimsGroups(newAuthRequest.Claims.Groups). + SetCodeChallenge(newAuthRequest.PKCE.CodeChallenge). + SetCodeChallengeMethod(newAuthRequest.PKCE.CodeChallengeMethod). + // Save utc time into database because ent doesn't support comparing dates with different timezones + SetExpiry(newAuthRequest.Expiry.UTC()). + SetConnectorID(newAuthRequest.ConnectorID). + SetConnectorData(newAuthRequest.ConnectorData). + SetHmacKey(newAuthRequest.HMACKey). + Save(context.TODO()) + if err != nil { + return rollback(tx, "update auth request uploading: %w", err) + } + + if err = tx.Commit(); err != nil { + return rollback(tx, "update auth request commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/client.go b/vendor/github.com/dexidp/dex/storage/ent/client/client.go new file mode 100644 index 00000000..07434bd6 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/client.go @@ -0,0 +1,92 @@ +package client + +import ( + "context" + + "github.com/dexidp/dex/storage" +) + +// CreateClient saves provided oauth2 client settings into the database. +func (d *Database) CreateClient(client storage.Client) error { + _, err := d.client.OAuth2Client.Create(). + SetID(client.ID). + SetName(client.Name). + SetSecret(client.Secret). + SetPublic(client.Public). + SetLogoURL(client.LogoURL). + SetRedirectUris(client.RedirectURIs). + SetTrustedPeers(client.TrustedPeers). + Save(context.TODO()) + if err != nil { + return convertDBError("create oauth2 client: %w", err) + } + return nil +} + +// ListClients extracts an array of oauth2 clients from the database. +func (d *Database) ListClients() ([]storage.Client, error) { + clients, err := d.client.OAuth2Client.Query().All(context.TODO()) + if err != nil { + return nil, convertDBError("list clients: %w", err) + } + + storageClients := make([]storage.Client, 0, len(clients)) + for _, c := range clients { + storageClients = append(storageClients, toStorageClient(c)) + } + return storageClients, nil +} + +// GetClient extracts an oauth2 client from the database by id. +func (d *Database) GetClient(id string) (storage.Client, error) { + client, err := d.client.OAuth2Client.Get(context.TODO(), id) + if err != nil { + return storage.Client{}, convertDBError("get client: %w", err) + } + return toStorageClient(client), nil +} + +// DeleteClient deletes an oauth2 client from the database by id. +func (d *Database) DeleteClient(id string) error { + err := d.client.OAuth2Client.DeleteOneID(id).Exec(context.TODO()) + if err != nil { + return convertDBError("delete client: %w", err) + } + return nil +} + +// UpdateClient changes an oauth2 client by id using an updater function and saves it to the database. +func (d *Database) UpdateClient(id string, updater func(old storage.Client) (storage.Client, error)) error { + tx, err := d.BeginTx(context.TODO()) + if err != nil { + return convertDBError("update client tx: %w", err) + } + + client, err := tx.OAuth2Client.Get(context.TODO(), id) + if err != nil { + return rollback(tx, "update client database: %w", err) + } + + newClient, err := updater(toStorageClient(client)) + if err != nil { + return rollback(tx, "update client updating: %w", err) + } + + _, err = tx.OAuth2Client.UpdateOneID(newClient.ID). + SetName(newClient.Name). + SetSecret(newClient.Secret). + SetPublic(newClient.Public). + SetLogoURL(newClient.LogoURL). + SetRedirectUris(newClient.RedirectURIs). + SetTrustedPeers(newClient.TrustedPeers). + Save(context.TODO()) + if err != nil { + return rollback(tx, "update client uploading: %w", err) + } + + if err = tx.Commit(); err != nil { + return rollback(tx, "update auth request commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/connector.go b/vendor/github.com/dexidp/dex/storage/ent/client/connector.go new file mode 100644 index 00000000..bfec4418 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/connector.go @@ -0,0 +1,88 @@ +package client + +import ( + "context" + + "github.com/dexidp/dex/storage" +) + +// CreateConnector saves a connector into the database. +func (d *Database) CreateConnector(connector storage.Connector) error { + _, err := d.client.Connector.Create(). + SetID(connector.ID). + SetName(connector.Name). + SetType(connector.Type). + SetResourceVersion(connector.ResourceVersion). + SetConfig(connector.Config). + Save(context.TODO()) + if err != nil { + return convertDBError("create connector: %w", err) + } + return nil +} + +// ListConnectors extracts an array of connectors from the database. +func (d *Database) ListConnectors() ([]storage.Connector, error) { + connectors, err := d.client.Connector.Query().All(context.TODO()) + if err != nil { + return nil, convertDBError("list connectors: %w", err) + } + + storageConnectors := make([]storage.Connector, 0, len(connectors)) + for _, c := range connectors { + storageConnectors = append(storageConnectors, toStorageConnector(c)) + } + return storageConnectors, nil +} + +// GetConnector extracts a connector from the database by id. +func (d *Database) GetConnector(id string) (storage.Connector, error) { + connector, err := d.client.Connector.Get(context.TODO(), id) + if err != nil { + return storage.Connector{}, convertDBError("get connector: %w", err) + } + return toStorageConnector(connector), nil +} + +// DeleteConnector deletes a connector from the database by id. +func (d *Database) DeleteConnector(id string) error { + err := d.client.Connector.DeleteOneID(id).Exec(context.TODO()) + if err != nil { + return convertDBError("delete connector: %w", err) + } + return nil +} + +// UpdateConnector changes a connector by id using an updater function and saves it to the database. +func (d *Database) UpdateConnector(id string, updater func(old storage.Connector) (storage.Connector, error)) error { + tx, err := d.BeginTx(context.TODO()) + if err != nil { + return convertDBError("update connector tx: %w", err) + } + + connector, err := tx.Connector.Get(context.TODO(), id) + if err != nil { + return rollback(tx, "update connector database: %w", err) + } + + newConnector, err := updater(toStorageConnector(connector)) + if err != nil { + return rollback(tx, "update connector updating: %w", err) + } + + _, err = tx.Connector.UpdateOneID(newConnector.ID). + SetName(newConnector.Name). + SetType(newConnector.Type). + SetResourceVersion(newConnector.ResourceVersion). + SetConfig(newConnector.Config). + Save(context.TODO()) + if err != nil { + return rollback(tx, "update connector uploading: %w", err) + } + + if err = tx.Commit(); err != nil { + return rollback(tx, "update connector commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/devicerequest.go b/vendor/github.com/dexidp/dex/storage/ent/client/devicerequest.go new file mode 100644 index 00000000..6e9c2500 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/devicerequest.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db/devicerequest" +) + +// CreateDeviceRequest saves provided device request into the database. +func (d *Database) CreateDeviceRequest(request storage.DeviceRequest) error { + _, err := d.client.DeviceRequest.Create(). + SetClientID(request.ClientID). + SetClientSecret(request.ClientSecret). + SetScopes(request.Scopes). + SetUserCode(request.UserCode). + SetDeviceCode(request.DeviceCode). + // Save utc time into database because ent doesn't support comparing dates with different timezones + SetExpiry(request.Expiry.UTC()). + Save(context.TODO()) + if err != nil { + return convertDBError("create device request: %w", err) + } + return nil +} + +// GetDeviceRequest extracts a device request from the database by user code. +func (d *Database) GetDeviceRequest(userCode string) (storage.DeviceRequest, error) { + deviceRequest, err := d.client.DeviceRequest.Query(). + Where(devicerequest.UserCode(userCode)). + Only(context.TODO()) + if err != nil { + return storage.DeviceRequest{}, convertDBError("get device request: %w", err) + } + return toStorageDeviceRequest(deviceRequest), nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/devicetoken.go b/vendor/github.com/dexidp/dex/storage/ent/client/devicetoken.go new file mode 100644 index 00000000..99cf077d --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/devicetoken.go @@ -0,0 +1,80 @@ +package client + +import ( + "context" + + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db/devicetoken" +) + +// CreateDeviceToken saves provided token into the database. +func (d *Database) CreateDeviceToken(token storage.DeviceToken) error { + _, err := d.client.DeviceToken.Create(). + SetDeviceCode(token.DeviceCode). + SetToken([]byte(token.Token)). + SetPollInterval(token.PollIntervalSeconds). + // Save utc time into database because ent doesn't support comparing dates with different timezones + SetExpiry(token.Expiry.UTC()). + SetLastRequest(token.LastRequestTime.UTC()). + SetStatus(token.Status). + SetCodeChallenge(token.PKCE.CodeChallenge). + SetCodeChallengeMethod(token.PKCE.CodeChallengeMethod). + Save(context.TODO()) + if err != nil { + return convertDBError("create device token: %w", err) + } + return nil +} + +// GetDeviceToken extracts a token from the database by device code. +func (d *Database) GetDeviceToken(deviceCode string) (storage.DeviceToken, error) { + deviceToken, err := d.client.DeviceToken.Query(). + Where(devicetoken.DeviceCode(deviceCode)). + Only(context.TODO()) + if err != nil { + return storage.DeviceToken{}, convertDBError("get device token: %w", err) + } + return toStorageDeviceToken(deviceToken), nil +} + +// UpdateDeviceToken changes a token by device code using an updater function and saves it to the database. +func (d *Database) UpdateDeviceToken(deviceCode string, updater func(old storage.DeviceToken) (storage.DeviceToken, error)) error { + tx, err := d.BeginTx(context.TODO()) + if err != nil { + return convertDBError("update device token tx: %w", err) + } + + token, err := tx.DeviceToken.Query(). + Where(devicetoken.DeviceCode(deviceCode)). + Only(context.TODO()) + if err != nil { + return rollback(tx, "update device token database: %w", err) + } + + newToken, err := updater(toStorageDeviceToken(token)) + if err != nil { + return rollback(tx, "update device token updating: %w", err) + } + + _, err = tx.DeviceToken.Update(). + Where(devicetoken.DeviceCode(newToken.DeviceCode)). + SetDeviceCode(newToken.DeviceCode). + SetToken([]byte(newToken.Token)). + SetPollInterval(newToken.PollIntervalSeconds). + // Save utc time into database because ent doesn't support comparing dates with different timezones + SetExpiry(newToken.Expiry.UTC()). + SetLastRequest(newToken.LastRequestTime.UTC()). + SetStatus(newToken.Status). + SetCodeChallenge(newToken.PKCE.CodeChallenge). + SetCodeChallengeMethod(newToken.PKCE.CodeChallengeMethod). + Save(context.TODO()) + if err != nil { + return rollback(tx, "update device token uploading: %w", err) + } + + if err = tx.Commit(); err != nil { + return rollback(tx, "update device token commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/keys.go b/vendor/github.com/dexidp/dex/storage/ent/client/keys.go new file mode 100644 index 00000000..f65d40fc --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/keys.go @@ -0,0 +1,81 @@ +package client + +import ( + "context" + "errors" + + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db" +) + +func getKeys(client *db.KeysClient) (storage.Keys, error) { + rawKeys, err := client.Get(context.TODO(), keysRowID) + if err != nil { + return storage.Keys{}, convertDBError("get keys: %w", err) + } + + return toStorageKeys(rawKeys), nil +} + +// GetKeys returns signing keys, public keys and verification keys from the database. +func (d *Database) GetKeys() (storage.Keys, error) { + return getKeys(d.client.Keys) +} + +// UpdateKeys rotates keys using updater function. +func (d *Database) UpdateKeys(updater func(old storage.Keys) (storage.Keys, error)) error { + firstUpdate := false + + tx, err := d.BeginTx(context.TODO()) + if err != nil { + return convertDBError("update keys tx: %w", err) + } + + storageKeys, err := getKeys(tx.Keys) + if err != nil { + if !errors.Is(err, storage.ErrNotFound) { + return rollback(tx, "update keys get: %w", err) + } + firstUpdate = true + } + + newKeys, err := updater(storageKeys) + if err != nil { + return rollback(tx, "update keys updating: %w", err) + } + + // ent doesn't have an upsert support yet + // https://github.com/facebook/ent/issues/139 + if firstUpdate { + _, err = tx.Keys.Create(). + SetID(keysRowID). + SetNextRotation(newKeys.NextRotation). + SetSigningKey(*newKeys.SigningKey). + SetSigningKeyPub(*newKeys.SigningKeyPub). + SetVerificationKeys(newKeys.VerificationKeys). + Save(context.TODO()) + if err != nil { + return rollback(tx, "create keys: %w", err) + } + if err = tx.Commit(); err != nil { + return rollback(tx, "update keys commit: %w", err) + } + return nil + } + + err = tx.Keys.UpdateOneID(keysRowID). + SetNextRotation(newKeys.NextRotation.UTC()). + SetSigningKey(*newKeys.SigningKey). + SetSigningKeyPub(*newKeys.SigningKeyPub). + SetVerificationKeys(newKeys.VerificationKeys). + Exec(context.TODO()) + if err != nil { + return rollback(tx, "update keys uploading: %w", err) + } + + if err = tx.Commit(); err != nil { + return rollback(tx, "update keys commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/main.go b/vendor/github.com/dexidp/dex/storage/ent/client/main.go new file mode 100644 index 00000000..bc4c1600 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/main.go @@ -0,0 +1,110 @@ +package client + +import ( + "context" + "database/sql" + "hash" + "time" + + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db" + "github.com/dexidp/dex/storage/ent/db/authcode" + "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/devicerequest" + "github.com/dexidp/dex/storage/ent/db/devicetoken" + "github.com/dexidp/dex/storage/ent/db/migrate" +) + +var _ storage.Storage = (*Database)(nil) + +type Database struct { + client *db.Client + txOptions *sql.TxOptions + + hasher func() hash.Hash +} + +// NewDatabase returns new database client with set options. +func NewDatabase(opts ...func(*Database)) *Database { + database := &Database{} + for _, f := range opts { + f(database) + } + return database +} + +// WithClient sets client option of a Database object. +func WithClient(c *db.Client) func(*Database) { + return func(s *Database) { + s.client = c + } +} + +// WithHasher sets client option of a Database object. +func WithHasher(h func() hash.Hash) func(*Database) { + return func(s *Database) { + s.hasher = h + } +} + +// WithTxIsolationLevel sets correct isolation level for database transactions. +func WithTxIsolationLevel(level sql.IsolationLevel) func(*Database) { + return func(s *Database) { + s.txOptions = &sql.TxOptions{Isolation: level} + } +} + +// Schema exposes migration schema to perform migrations. +func (d *Database) Schema() *migrate.Schema { + return d.client.Schema +} + +// Close calls the corresponding method of the ent database client. +func (d *Database) Close() error { + return d.client.Close() +} + +// BeginTx is a wrapper to begin transaction with defined options. +func (d *Database) BeginTx(ctx context.Context) (*db.Tx, error) { + return d.client.BeginTx(ctx, d.txOptions) +} + +// GarbageCollect removes expired entities from the database. +func (d *Database) GarbageCollect(now time.Time) (storage.GCResult, error) { + result := storage.GCResult{} + utcNow := now.UTC() + + q, err := d.client.AuthRequest.Delete(). + Where(authrequest.ExpiryLT(utcNow)). + Exec(context.TODO()) + if err != nil { + return result, convertDBError("gc auth request: %w", err) + } + result.AuthRequests = int64(q) + + q, err = d.client.AuthCode.Delete(). + Where(authcode.ExpiryLT(utcNow)). + Exec(context.TODO()) + if err != nil { + return result, convertDBError("gc auth code: %w", err) + } + result.AuthCodes = int64(q) + + q, err = d.client.DeviceRequest.Delete(). + Where(devicerequest.ExpiryLT(utcNow)). + Exec(context.TODO()) + if err != nil { + return result, convertDBError("gc device request: %w", err) + } + result.DeviceRequests = int64(q) + + q, err = d.client.DeviceToken.Delete(). + Where(devicetoken.ExpiryLT(utcNow)). + Exec(context.TODO()) + if err != nil { + return result, convertDBError("gc device token: %w", err) + } + result.DeviceTokens = int64(q) + + return result, err +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/offlinesession.go b/vendor/github.com/dexidp/dex/storage/ent/client/offlinesession.go new file mode 100644 index 00000000..9f54ea1d --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/offlinesession.go @@ -0,0 +1,93 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/dexidp/dex/storage" +) + +// CreateOfflineSessions saves provided offline session into the database. +func (d *Database) CreateOfflineSessions(session storage.OfflineSessions) error { + encodedRefresh, err := json.Marshal(session.Refresh) + if err != nil { + return fmt.Errorf("encode refresh offline session: %w", err) + } + + id := offlineSessionID(session.UserID, session.ConnID, d.hasher) + _, err = d.client.OfflineSession.Create(). + SetID(id). + SetUserID(session.UserID). + SetConnID(session.ConnID). + SetConnectorData(session.ConnectorData). + SetRefresh(encodedRefresh). + Save(context.TODO()) + if err != nil { + return convertDBError("create offline session: %w", err) + } + return nil +} + +// GetOfflineSessions extracts an offline session from the database by user id and connector id. +func (d *Database) GetOfflineSessions(userID, connID string) (storage.OfflineSessions, error) { + id := offlineSessionID(userID, connID, d.hasher) + + offlineSession, err := d.client.OfflineSession.Get(context.TODO(), id) + if err != nil { + return storage.OfflineSessions{}, convertDBError("get offline session: %w", err) + } + return toStorageOfflineSession(offlineSession), nil +} + +// DeleteOfflineSessions deletes an offline session from the database by user id and connector id. +func (d *Database) DeleteOfflineSessions(userID, connID string) error { + id := offlineSessionID(userID, connID, d.hasher) + + err := d.client.OfflineSession.DeleteOneID(id).Exec(context.TODO()) + if err != nil { + return convertDBError("delete offline session: %w", err) + } + return nil +} + +// UpdateOfflineSessions changes an offline session by user id and connector id using an updater function. +func (d *Database) UpdateOfflineSessions(userID string, connID string, updater func(s storage.OfflineSessions) (storage.OfflineSessions, error)) error { + id := offlineSessionID(userID, connID, d.hasher) + + tx, err := d.BeginTx(context.TODO()) + if err != nil { + return convertDBError("update offline session tx: %w", err) + } + + offlineSession, err := tx.OfflineSession.Get(context.TODO(), id) + if err != nil { + return rollback(tx, "update offline session database: %w", err) + } + + newOfflineSession, err := updater(toStorageOfflineSession(offlineSession)) + if err != nil { + return rollback(tx, "update offline session updating: %w", err) + } + + encodedRefresh, err := json.Marshal(newOfflineSession.Refresh) + if err != nil { + return rollback(tx, "encode refresh offline session: %w", err) + } + + _, err = tx.OfflineSession.UpdateOneID(id). + SetUserID(newOfflineSession.UserID). + SetConnID(newOfflineSession.ConnID). + SetConnectorData(newOfflineSession.ConnectorData). + SetRefresh(encodedRefresh). + Save(context.TODO()) + if err != nil { + return rollback(tx, "update offline session uploading: %w", err) + } + + if err = tx.Commit(); err != nil { + return rollback(tx, "update offline session commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/password.go b/vendor/github.com/dexidp/dex/storage/ent/client/password.go new file mode 100644 index 00000000..daaae30c --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/password.go @@ -0,0 +1,100 @@ +package client + +import ( + "context" + "strings" + + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db/password" +) + +// CreatePassword saves provided password into the database. +func (d *Database) CreatePassword(password storage.Password) error { + _, err := d.client.Password.Create(). + SetEmail(password.Email). + SetHash(password.Hash). + SetUsername(password.Username). + SetUserID(password.UserID). + Save(context.TODO()) + if err != nil { + return convertDBError("create password: %w", err) + } + return nil +} + +// ListPasswords extracts an array of passwords from the database. +func (d *Database) ListPasswords() ([]storage.Password, error) { + passwords, err := d.client.Password.Query().All(context.TODO()) + if err != nil { + return nil, convertDBError("list passwords: %w", err) + } + + storagePasswords := make([]storage.Password, 0, len(passwords)) + for _, p := range passwords { + storagePasswords = append(storagePasswords, toStoragePassword(p)) + } + return storagePasswords, nil +} + +// GetPassword extracts a password from the database by email. +func (d *Database) GetPassword(email string) (storage.Password, error) { + email = strings.ToLower(email) + passwordFromStorage, err := d.client.Password.Query(). + Where(password.Email(email)). + Only(context.TODO()) + if err != nil { + return storage.Password{}, convertDBError("get password: %w", err) + } + return toStoragePassword(passwordFromStorage), nil +} + +// DeletePassword deletes a password from the database by email. +func (d *Database) DeletePassword(email string) error { + email = strings.ToLower(email) + _, err := d.client.Password.Delete(). + Where(password.Email(email)). + Exec(context.TODO()) + if err != nil { + return convertDBError("delete password: %w", err) + } + return nil +} + +// UpdatePassword changes a password by email using an updater function and saves it to the database. +func (d *Database) UpdatePassword(email string, updater func(old storage.Password) (storage.Password, error)) error { + email = strings.ToLower(email) + + tx, err := d.BeginTx(context.TODO()) + if err != nil { + return convertDBError("update connector tx: %w", err) + } + + passwordToUpdate, err := tx.Password.Query(). + Where(password.Email(email)). + Only(context.TODO()) + if err != nil { + return rollback(tx, "update password database: %w", err) + } + + newPassword, err := updater(toStoragePassword(passwordToUpdate)) + if err != nil { + return rollback(tx, "update password updating: %w", err) + } + + _, err = tx.Password.Update(). + Where(password.Email(newPassword.Email)). + SetEmail(newPassword.Email). + SetHash(newPassword.Hash). + SetUsername(newPassword.Username). + SetUserID(newPassword.UserID). + Save(context.TODO()) + if err != nil { + return rollback(tx, "update password uploading: %w", err) + } + + if err = tx.Commit(); err != nil { + return rollback(tx, "update password commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/refreshtoken.go b/vendor/github.com/dexidp/dex/storage/ent/client/refreshtoken.go new file mode 100644 index 00000000..eca048f4 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/refreshtoken.go @@ -0,0 +1,111 @@ +package client + +import ( + "context" + + "github.com/dexidp/dex/storage" +) + +// CreateRefresh saves provided refresh token into the database. +func (d *Database) CreateRefresh(refresh storage.RefreshToken) error { + _, err := d.client.RefreshToken.Create(). + SetID(refresh.ID). + SetClientID(refresh.ClientID). + SetScopes(refresh.Scopes). + SetNonce(refresh.Nonce). + SetClaimsUserID(refresh.Claims.UserID). + SetClaimsEmail(refresh.Claims.Email). + SetClaimsEmailVerified(refresh.Claims.EmailVerified). + SetClaimsUsername(refresh.Claims.Username). + SetClaimsPreferredUsername(refresh.Claims.PreferredUsername). + SetClaimsGroups(refresh.Claims.Groups). + SetConnectorID(refresh.ConnectorID). + SetConnectorData(refresh.ConnectorData). + SetToken(refresh.Token). + SetObsoleteToken(refresh.ObsoleteToken). + // Save utc time into database because ent doesn't support comparing dates with different timezones + SetLastUsed(refresh.LastUsed.UTC()). + SetCreatedAt(refresh.CreatedAt.UTC()). + Save(context.TODO()) + if err != nil { + return convertDBError("create refresh token: %w", err) + } + return nil +} + +// ListRefreshTokens extracts an array of refresh tokens from the database. +func (d *Database) ListRefreshTokens() ([]storage.RefreshToken, error) { + refreshTokens, err := d.client.RefreshToken.Query().All(context.TODO()) + if err != nil { + return nil, convertDBError("list refresh tokens: %w", err) + } + + storageRefreshTokens := make([]storage.RefreshToken, 0, len(refreshTokens)) + for _, r := range refreshTokens { + storageRefreshTokens = append(storageRefreshTokens, toStorageRefreshToken(r)) + } + return storageRefreshTokens, nil +} + +// GetRefresh extracts a refresh token from the database by id. +func (d *Database) GetRefresh(id string) (storage.RefreshToken, error) { + refreshToken, err := d.client.RefreshToken.Get(context.TODO(), id) + if err != nil { + return storage.RefreshToken{}, convertDBError("get refresh token: %w", err) + } + return toStorageRefreshToken(refreshToken), nil +} + +// DeleteRefresh deletes a refresh token from the database by id. +func (d *Database) DeleteRefresh(id string) error { + err := d.client.RefreshToken.DeleteOneID(id).Exec(context.TODO()) + if err != nil { + return convertDBError("delete refresh token: %w", err) + } + return nil +} + +// UpdateRefreshToken changes a refresh token by id using an updater function and saves it to the database. +func (d *Database) UpdateRefreshToken(id string, updater func(old storage.RefreshToken) (storage.RefreshToken, error)) error { + tx, err := d.BeginTx(context.TODO()) + if err != nil { + return convertDBError("update refresh token tx: %w", err) + } + + token, err := tx.RefreshToken.Get(context.TODO(), id) + if err != nil { + return rollback(tx, "update refresh token database: %w", err) + } + + newtToken, err := updater(toStorageRefreshToken(token)) + if err != nil { + return rollback(tx, "update refresh token updating: %w", err) + } + + _, err = tx.RefreshToken.UpdateOneID(newtToken.ID). + SetClientID(newtToken.ClientID). + SetScopes(newtToken.Scopes). + SetNonce(newtToken.Nonce). + SetClaimsUserID(newtToken.Claims.UserID). + SetClaimsEmail(newtToken.Claims.Email). + SetClaimsEmailVerified(newtToken.Claims.EmailVerified). + SetClaimsUsername(newtToken.Claims.Username). + SetClaimsPreferredUsername(newtToken.Claims.PreferredUsername). + SetClaimsGroups(newtToken.Claims.Groups). + SetConnectorID(newtToken.ConnectorID). + SetConnectorData(newtToken.ConnectorData). + SetToken(newtToken.Token). + SetObsoleteToken(newtToken.ObsoleteToken). + // Save utc time into database because ent doesn't support comparing dates with different timezones + SetLastUsed(newtToken.LastUsed.UTC()). + SetCreatedAt(newtToken.CreatedAt.UTC()). + Save(context.TODO()) + if err != nil { + return rollback(tx, "update refresh token uploading: %w", err) + } + + if err = tx.Commit(); err != nil { + return rollback(tx, "update refresh token commit: %w", err) + } + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/types.go b/vendor/github.com/dexidp/dex/storage/ent/client/types.go new file mode 100644 index 00000000..397d4d30 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/types.go @@ -0,0 +1,173 @@ +package client + +import ( + "encoding/json" + "strings" + + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db" +) + +const keysRowID = "keys" + +func toStorageKeys(keys *db.Keys) storage.Keys { + return storage.Keys{ + SigningKey: &keys.SigningKey, + SigningKeyPub: &keys.SigningKeyPub, + VerificationKeys: keys.VerificationKeys, + NextRotation: keys.NextRotation, + } +} + +func toStorageAuthRequest(a *db.AuthRequest) storage.AuthRequest { + return storage.AuthRequest{ + ID: a.ID, + ClientID: a.ClientID, + ResponseTypes: a.ResponseTypes, + Scopes: a.Scopes, + RedirectURI: a.RedirectURI, + Nonce: a.Nonce, + State: a.State, + ForceApprovalPrompt: a.ForceApprovalPrompt, + LoggedIn: a.LoggedIn, + ConnectorID: a.ConnectorID, + ConnectorData: *a.ConnectorData, + Expiry: a.Expiry, + Claims: storage.Claims{ + UserID: a.ClaimsUserID, + Username: a.ClaimsUsername, + PreferredUsername: a.ClaimsPreferredUsername, + Email: a.ClaimsEmail, + EmailVerified: a.ClaimsEmailVerified, + Groups: a.ClaimsGroups, + }, + PKCE: storage.PKCE{ + CodeChallenge: a.CodeChallenge, + CodeChallengeMethod: a.CodeChallengeMethod, + }, + HMACKey: a.HmacKey, + } +} + +func toStorageAuthCode(a *db.AuthCode) storage.AuthCode { + return storage.AuthCode{ + ID: a.ID, + ClientID: a.ClientID, + Scopes: a.Scopes, + RedirectURI: a.RedirectURI, + Nonce: a.Nonce, + ConnectorID: a.ConnectorID, + ConnectorData: *a.ConnectorData, + Expiry: a.Expiry, + Claims: storage.Claims{ + UserID: a.ClaimsUserID, + Username: a.ClaimsUsername, + PreferredUsername: a.ClaimsPreferredUsername, + Email: a.ClaimsEmail, + EmailVerified: a.ClaimsEmailVerified, + Groups: a.ClaimsGroups, + }, + PKCE: storage.PKCE{ + CodeChallenge: a.CodeChallenge, + CodeChallengeMethod: a.CodeChallengeMethod, + }, + } +} + +func toStorageClient(c *db.OAuth2Client) storage.Client { + return storage.Client{ + ID: c.ID, + Secret: c.Secret, + RedirectURIs: c.RedirectUris, + TrustedPeers: c.TrustedPeers, + Public: c.Public, + Name: c.Name, + LogoURL: c.LogoURL, + } +} + +func toStorageConnector(c *db.Connector) storage.Connector { + return storage.Connector{ + ID: c.ID, + Type: c.Type, + Name: c.Name, + Config: c.Config, + } +} + +func toStorageOfflineSession(o *db.OfflineSession) storage.OfflineSessions { + s := storage.OfflineSessions{ + UserID: o.UserID, + ConnID: o.ConnID, + ConnectorData: *o.ConnectorData, + } + + if o.Refresh != nil { + if err := json.Unmarshal(o.Refresh, &s.Refresh); err != nil { + // Correctness of json structure if guaranteed on uploading + panic(err) + } + } else { + // Server code assumes this will be non-nil. + s.Refresh = make(map[string]*storage.RefreshTokenRef) + } + return s +} + +func toStorageRefreshToken(r *db.RefreshToken) storage.RefreshToken { + return storage.RefreshToken{ + ID: r.ID, + Token: r.Token, + ObsoleteToken: r.ObsoleteToken, + CreatedAt: r.CreatedAt, + LastUsed: r.LastUsed, + ClientID: r.ClientID, + ConnectorID: r.ConnectorID, + ConnectorData: *r.ConnectorData, + Scopes: r.Scopes, + Nonce: r.Nonce, + Claims: storage.Claims{ + UserID: r.ClaimsUserID, + Username: r.ClaimsUsername, + PreferredUsername: r.ClaimsPreferredUsername, + Email: r.ClaimsEmail, + EmailVerified: r.ClaimsEmailVerified, + Groups: r.ClaimsGroups, + }, + } +} + +func toStoragePassword(p *db.Password) storage.Password { + return storage.Password{ + Email: p.Email, + Hash: p.Hash, + Username: p.Username, + UserID: p.UserID, + } +} + +func toStorageDeviceRequest(r *db.DeviceRequest) storage.DeviceRequest { + return storage.DeviceRequest{ + UserCode: strings.ToUpper(r.UserCode), + DeviceCode: r.DeviceCode, + ClientID: r.ClientID, + ClientSecret: r.ClientSecret, + Scopes: r.Scopes, + Expiry: r.Expiry, + } +} + +func toStorageDeviceToken(t *db.DeviceToken) storage.DeviceToken { + return storage.DeviceToken{ + DeviceCode: t.DeviceCode, + Status: t.Status, + Token: string(*t.Token), + Expiry: t.Expiry, + LastRequestTime: t.LastRequest, + PollIntervalSeconds: t.PollInterval, + PKCE: storage.PKCE{ + CodeChallenge: t.CodeChallenge, + CodeChallengeMethod: t.CodeChallengeMethod, + }, + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/client/utils.go b/vendor/github.com/dexidp/dex/storage/ent/client/utils.go new file mode 100644 index 00000000..65c037ac --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/client/utils.go @@ -0,0 +1,44 @@ +package client + +import ( + "fmt" + "hash" + + "github.com/pkg/errors" + + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db" +) + +func rollback(tx *db.Tx, t string, err error) error { + rerr := tx.Rollback() + err = convertDBError(t, err) + + if rerr == nil { + return err + } + return errors.Wrapf(err, "rolling back transaction: %v", rerr) +} + +func convertDBError(t string, err error) error { + if db.IsNotFound(err) { + return storage.ErrNotFound + } + + if db.IsConstraintError(err) { + return storage.ErrAlreadyExists + } + + return fmt.Errorf(t, err) +} + +// compose hashed id from user and connection id to use it as primary key +// ent doesn't support multi-key primary yet +// https://github.com/facebook/ent/issues/400 +func offlineSessionID(userID string, connID string, hasher func() hash.Hash) string { + h := hasher() + + h.Write([]byte(userID)) + h.Write([]byte(connID)) + return fmt.Sprintf("%x", h.Sum(nil)) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/BUILD new file mode 100644 index 00000000..89c22d8b --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/BUILD @@ -0,0 +1,88 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "db", + srcs = [ + "authcode.go", + "authcode_create.go", + "authcode_delete.go", + "authcode_query.go", + "authcode_update.go", + "authrequest.go", + "authrequest_create.go", + "authrequest_delete.go", + "authrequest_query.go", + "authrequest_update.go", + "client.go", + "connector.go", + "connector_create.go", + "connector_delete.go", + "connector_query.go", + "connector_update.go", + "devicerequest.go", + "devicerequest_create.go", + "devicerequest_delete.go", + "devicerequest_query.go", + "devicerequest_update.go", + "devicetoken.go", + "devicetoken_create.go", + "devicetoken_delete.go", + "devicetoken_query.go", + "devicetoken_update.go", + "ent.go", + "keys.go", + "keys_create.go", + "keys_delete.go", + "keys_query.go", + "keys_update.go", + "mutation.go", + "oauth2client.go", + "oauth2client_create.go", + "oauth2client_delete.go", + "oauth2client_query.go", + "oauth2client_update.go", + "offlinesession.go", + "offlinesession_create.go", + "offlinesession_delete.go", + "offlinesession_query.go", + "offlinesession_update.go", + "password.go", + "password_create.go", + "password_delete.go", + "password_query.go", + "password_update.go", + "refreshtoken.go", + "refreshtoken_create.go", + "refreshtoken_delete.go", + "refreshtoken_query.go", + "refreshtoken_update.go", + "runtime.go", + "tx.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db", + importpath = "github.com/dexidp/dex/storage/ent/db", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent", + "//vendor/entgo.io/ent/dialect", + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/entgo.io/ent/dialect/sql/sqlgraph", + "//vendor/entgo.io/ent/dialect/sql/sqljson", + "//vendor/entgo.io/ent/schema/field", + "//vendor/github.com/dexidp/dex/storage", + "//vendor/github.com/dexidp/dex/storage/ent/db/authcode", + "//vendor/github.com/dexidp/dex/storage/ent/db/authrequest", + "//vendor/github.com/dexidp/dex/storage/ent/db/connector", + "//vendor/github.com/dexidp/dex/storage/ent/db/devicerequest", + "//vendor/github.com/dexidp/dex/storage/ent/db/devicetoken", + "//vendor/github.com/dexidp/dex/storage/ent/db/keys", + "//vendor/github.com/dexidp/dex/storage/ent/db/migrate", + "//vendor/github.com/dexidp/dex/storage/ent/db/oauth2client", + "//vendor/github.com/dexidp/dex/storage/ent/db/offlinesession", + "//vendor/github.com/dexidp/dex/storage/ent/db/password", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + "//vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken", + "//vendor/github.com/dexidp/dex/storage/ent/schema", + "//vendor/gopkg.in/square/go-jose.v2:go-jose_v2", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authcode.go b/vendor/github.com/dexidp/dex/storage/ent/db/authcode.go new file mode 100644 index 00000000..841d0b8b --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authcode.go @@ -0,0 +1,269 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/authcode" +) + +// AuthCode is the model entity for the AuthCode schema. +type AuthCode struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // ClientID holds the value of the "client_id" field. + ClientID string `json:"client_id,omitempty"` + // Scopes holds the value of the "scopes" field. + Scopes []string `json:"scopes,omitempty"` + // Nonce holds the value of the "nonce" field. + Nonce string `json:"nonce,omitempty"` + // RedirectURI holds the value of the "redirect_uri" field. + RedirectURI string `json:"redirect_uri,omitempty"` + // ClaimsUserID holds the value of the "claims_user_id" field. + ClaimsUserID string `json:"claims_user_id,omitempty"` + // ClaimsUsername holds the value of the "claims_username" field. + ClaimsUsername string `json:"claims_username,omitempty"` + // ClaimsEmail holds the value of the "claims_email" field. + ClaimsEmail string `json:"claims_email,omitempty"` + // ClaimsEmailVerified holds the value of the "claims_email_verified" field. + ClaimsEmailVerified bool `json:"claims_email_verified,omitempty"` + // ClaimsGroups holds the value of the "claims_groups" field. + ClaimsGroups []string `json:"claims_groups,omitempty"` + // ClaimsPreferredUsername holds the value of the "claims_preferred_username" field. + ClaimsPreferredUsername string `json:"claims_preferred_username,omitempty"` + // ConnectorID holds the value of the "connector_id" field. + ConnectorID string `json:"connector_id,omitempty"` + // ConnectorData holds the value of the "connector_data" field. + ConnectorData *[]byte `json:"connector_data,omitempty"` + // Expiry holds the value of the "expiry" field. + Expiry time.Time `json:"expiry,omitempty"` + // CodeChallenge holds the value of the "code_challenge" field. + CodeChallenge string `json:"code_challenge,omitempty"` + // CodeChallengeMethod holds the value of the "code_challenge_method" field. + CodeChallengeMethod string `json:"code_challenge_method,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*AuthCode) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case authcode.FieldScopes, authcode.FieldClaimsGroups, authcode.FieldConnectorData: + values[i] = new([]byte) + case authcode.FieldClaimsEmailVerified: + values[i] = new(sql.NullBool) + case authcode.FieldID, authcode.FieldClientID, authcode.FieldNonce, authcode.FieldRedirectURI, authcode.FieldClaimsUserID, authcode.FieldClaimsUsername, authcode.FieldClaimsEmail, authcode.FieldClaimsPreferredUsername, authcode.FieldConnectorID, authcode.FieldCodeChallenge, authcode.FieldCodeChallengeMethod: + values[i] = new(sql.NullString) + case authcode.FieldExpiry: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the AuthCode fields. +func (ac *AuthCode) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case authcode.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + ac.ID = value.String + } + case authcode.FieldClientID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field client_id", values[i]) + } else if value.Valid { + ac.ClientID = value.String + } + case authcode.FieldScopes: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field scopes", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &ac.Scopes); err != nil { + return fmt.Errorf("unmarshal field scopes: %w", err) + } + } + case authcode.FieldNonce: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field nonce", values[i]) + } else if value.Valid { + ac.Nonce = value.String + } + case authcode.FieldRedirectURI: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field redirect_uri", values[i]) + } else if value.Valid { + ac.RedirectURI = value.String + } + case authcode.FieldClaimsUserID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_user_id", values[i]) + } else if value.Valid { + ac.ClaimsUserID = value.String + } + case authcode.FieldClaimsUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_username", values[i]) + } else if value.Valid { + ac.ClaimsUsername = value.String + } + case authcode.FieldClaimsEmail: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_email", values[i]) + } else if value.Valid { + ac.ClaimsEmail = value.String + } + case authcode.FieldClaimsEmailVerified: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field claims_email_verified", values[i]) + } else if value.Valid { + ac.ClaimsEmailVerified = value.Bool + } + case authcode.FieldClaimsGroups: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field claims_groups", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &ac.ClaimsGroups); err != nil { + return fmt.Errorf("unmarshal field claims_groups: %w", err) + } + } + case authcode.FieldClaimsPreferredUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_preferred_username", values[i]) + } else if value.Valid { + ac.ClaimsPreferredUsername = value.String + } + case authcode.FieldConnectorID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field connector_id", values[i]) + } else if value.Valid { + ac.ConnectorID = value.String + } + case authcode.FieldConnectorData: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field connector_data", values[i]) + } else if value != nil { + ac.ConnectorData = value + } + case authcode.FieldExpiry: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expiry", values[i]) + } else if value.Valid { + ac.Expiry = value.Time + } + case authcode.FieldCodeChallenge: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field code_challenge", values[i]) + } else if value.Valid { + ac.CodeChallenge = value.String + } + case authcode.FieldCodeChallengeMethod: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field code_challenge_method", values[i]) + } else if value.Valid { + ac.CodeChallengeMethod = value.String + } + default: + ac.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the AuthCode. +// This includes values selected through modifiers, order, etc. +func (ac *AuthCode) Value(name string) (ent.Value, error) { + return ac.selectValues.Get(name) +} + +// Update returns a builder for updating this AuthCode. +// Note that you need to call AuthCode.Unwrap() before calling this method if this AuthCode +// was returned from a transaction, and the transaction was committed or rolled back. +func (ac *AuthCode) Update() *AuthCodeUpdateOne { + return NewAuthCodeClient(ac.config).UpdateOne(ac) +} + +// Unwrap unwraps the AuthCode entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (ac *AuthCode) Unwrap() *AuthCode { + _tx, ok := ac.config.driver.(*txDriver) + if !ok { + panic("db: AuthCode is not a transactional entity") + } + ac.config.driver = _tx.drv + return ac +} + +// String implements the fmt.Stringer. +func (ac *AuthCode) String() string { + var builder strings.Builder + builder.WriteString("AuthCode(") + builder.WriteString(fmt.Sprintf("id=%v, ", ac.ID)) + builder.WriteString("client_id=") + builder.WriteString(ac.ClientID) + builder.WriteString(", ") + builder.WriteString("scopes=") + builder.WriteString(fmt.Sprintf("%v", ac.Scopes)) + builder.WriteString(", ") + builder.WriteString("nonce=") + builder.WriteString(ac.Nonce) + builder.WriteString(", ") + builder.WriteString("redirect_uri=") + builder.WriteString(ac.RedirectURI) + builder.WriteString(", ") + builder.WriteString("claims_user_id=") + builder.WriteString(ac.ClaimsUserID) + builder.WriteString(", ") + builder.WriteString("claims_username=") + builder.WriteString(ac.ClaimsUsername) + builder.WriteString(", ") + builder.WriteString("claims_email=") + builder.WriteString(ac.ClaimsEmail) + builder.WriteString(", ") + builder.WriteString("claims_email_verified=") + builder.WriteString(fmt.Sprintf("%v", ac.ClaimsEmailVerified)) + builder.WriteString(", ") + builder.WriteString("claims_groups=") + builder.WriteString(fmt.Sprintf("%v", ac.ClaimsGroups)) + builder.WriteString(", ") + builder.WriteString("claims_preferred_username=") + builder.WriteString(ac.ClaimsPreferredUsername) + builder.WriteString(", ") + builder.WriteString("connector_id=") + builder.WriteString(ac.ConnectorID) + builder.WriteString(", ") + if v := ac.ConnectorData; v != nil { + builder.WriteString("connector_data=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("expiry=") + builder.WriteString(ac.Expiry.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("code_challenge=") + builder.WriteString(ac.CodeChallenge) + builder.WriteString(", ") + builder.WriteString("code_challenge_method=") + builder.WriteString(ac.CodeChallengeMethod) + builder.WriteByte(')') + return builder.String() +} + +// AuthCodes is a parsable slice of AuthCode. +type AuthCodes []*AuthCode diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authcode/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/authcode/BUILD new file mode 100644 index 00000000..ce2a629b --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authcode/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "authcode", + srcs = [ + "authcode.go", + "where.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/authcode", + importpath = "github.com/dexidp/dex/storage/ent/db/authcode", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authcode/authcode.go b/vendor/github.com/dexidp/dex/storage/ent/db/authcode/authcode.go new file mode 100644 index 00000000..6e056f15 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authcode/authcode.go @@ -0,0 +1,169 @@ +// Code generated by ent, DO NOT EDIT. + +package authcode + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the authcode type in the database. + Label = "auth_code" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldClientID holds the string denoting the client_id field in the database. + FieldClientID = "client_id" + // FieldScopes holds the string denoting the scopes field in the database. + FieldScopes = "scopes" + // FieldNonce holds the string denoting the nonce field in the database. + FieldNonce = "nonce" + // FieldRedirectURI holds the string denoting the redirect_uri field in the database. + FieldRedirectURI = "redirect_uri" + // FieldClaimsUserID holds the string denoting the claims_user_id field in the database. + FieldClaimsUserID = "claims_user_id" + // FieldClaimsUsername holds the string denoting the claims_username field in the database. + FieldClaimsUsername = "claims_username" + // FieldClaimsEmail holds the string denoting the claims_email field in the database. + FieldClaimsEmail = "claims_email" + // FieldClaimsEmailVerified holds the string denoting the claims_email_verified field in the database. + FieldClaimsEmailVerified = "claims_email_verified" + // FieldClaimsGroups holds the string denoting the claims_groups field in the database. + FieldClaimsGroups = "claims_groups" + // FieldClaimsPreferredUsername holds the string denoting the claims_preferred_username field in the database. + FieldClaimsPreferredUsername = "claims_preferred_username" + // FieldConnectorID holds the string denoting the connector_id field in the database. + FieldConnectorID = "connector_id" + // FieldConnectorData holds the string denoting the connector_data field in the database. + FieldConnectorData = "connector_data" + // FieldExpiry holds the string denoting the expiry field in the database. + FieldExpiry = "expiry" + // FieldCodeChallenge holds the string denoting the code_challenge field in the database. + FieldCodeChallenge = "code_challenge" + // FieldCodeChallengeMethod holds the string denoting the code_challenge_method field in the database. + FieldCodeChallengeMethod = "code_challenge_method" + // Table holds the table name of the authcode in the database. + Table = "auth_codes" +) + +// Columns holds all SQL columns for authcode fields. +var Columns = []string{ + FieldID, + FieldClientID, + FieldScopes, + FieldNonce, + FieldRedirectURI, + FieldClaimsUserID, + FieldClaimsUsername, + FieldClaimsEmail, + FieldClaimsEmailVerified, + FieldClaimsGroups, + FieldClaimsPreferredUsername, + FieldConnectorID, + FieldConnectorData, + FieldExpiry, + FieldCodeChallenge, + FieldCodeChallengeMethod, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // ClientIDValidator is a validator for the "client_id" field. It is called by the builders before save. + ClientIDValidator func(string) error + // NonceValidator is a validator for the "nonce" field. It is called by the builders before save. + NonceValidator func(string) error + // RedirectURIValidator is a validator for the "redirect_uri" field. It is called by the builders before save. + RedirectURIValidator func(string) error + // ClaimsUserIDValidator is a validator for the "claims_user_id" field. It is called by the builders before save. + ClaimsUserIDValidator func(string) error + // ClaimsUsernameValidator is a validator for the "claims_username" field. It is called by the builders before save. + ClaimsUsernameValidator func(string) error + // ClaimsEmailValidator is a validator for the "claims_email" field. It is called by the builders before save. + ClaimsEmailValidator func(string) error + // DefaultClaimsPreferredUsername holds the default value on creation for the "claims_preferred_username" field. + DefaultClaimsPreferredUsername string + // ConnectorIDValidator is a validator for the "connector_id" field. It is called by the builders before save. + ConnectorIDValidator func(string) error + // DefaultCodeChallenge holds the default value on creation for the "code_challenge" field. + DefaultCodeChallenge string + // DefaultCodeChallengeMethod holds the default value on creation for the "code_challenge_method" field. + DefaultCodeChallengeMethod string + // IDValidator is a validator for the "id" field. It is called by the builders before save. + IDValidator func(string) error +) + +// OrderOption defines the ordering options for the AuthCode queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByClientID orders the results by the client_id field. +func ByClientID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClientID, opts...).ToFunc() +} + +// ByNonce orders the results by the nonce field. +func ByNonce(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNonce, opts...).ToFunc() +} + +// ByRedirectURI orders the results by the redirect_uri field. +func ByRedirectURI(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRedirectURI, opts...).ToFunc() +} + +// ByClaimsUserID orders the results by the claims_user_id field. +func ByClaimsUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsUserID, opts...).ToFunc() +} + +// ByClaimsUsername orders the results by the claims_username field. +func ByClaimsUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsUsername, opts...).ToFunc() +} + +// ByClaimsEmail orders the results by the claims_email field. +func ByClaimsEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsEmail, opts...).ToFunc() +} + +// ByClaimsEmailVerified orders the results by the claims_email_verified field. +func ByClaimsEmailVerified(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsEmailVerified, opts...).ToFunc() +} + +// ByClaimsPreferredUsername orders the results by the claims_preferred_username field. +func ByClaimsPreferredUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsPreferredUsername, opts...).ToFunc() +} + +// ByConnectorID orders the results by the connector_id field. +func ByConnectorID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldConnectorID, opts...).ToFunc() +} + +// ByExpiry orders the results by the expiry field. +func ByExpiry(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiry, opts...).ToFunc() +} + +// ByCodeChallenge orders the results by the code_challenge field. +func ByCodeChallenge(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCodeChallenge, opts...).ToFunc() +} + +// ByCodeChallengeMethod orders the results by the code_challenge_method field. +func ByCodeChallengeMethod(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCodeChallengeMethod, opts...).ToFunc() +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authcode/where.go b/vendor/github.com/dexidp/dex/storage/ent/db/authcode/where.go new file mode 100644 index 00000000..87f1f6e6 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authcode/where.go @@ -0,0 +1,932 @@ +// Code generated by ent, DO NOT EDIT. + +package authcode + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldID, id)) +} + +// ClientID applies equality check predicate on the "client_id" field. It's identical to ClientIDEQ. +func ClientID(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClientID, v)) +} + +// Nonce applies equality check predicate on the "nonce" field. It's identical to NonceEQ. +func Nonce(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldNonce, v)) +} + +// RedirectURI applies equality check predicate on the "redirect_uri" field. It's identical to RedirectURIEQ. +func RedirectURI(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldRedirectURI, v)) +} + +// ClaimsUserID applies equality check predicate on the "claims_user_id" field. It's identical to ClaimsUserIDEQ. +func ClaimsUserID(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClaimsUserID, v)) +} + +// ClaimsUsername applies equality check predicate on the "claims_username" field. It's identical to ClaimsUsernameEQ. +func ClaimsUsername(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClaimsUsername, v)) +} + +// ClaimsEmail applies equality check predicate on the "claims_email" field. It's identical to ClaimsEmailEQ. +func ClaimsEmail(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailVerified applies equality check predicate on the "claims_email_verified" field. It's identical to ClaimsEmailVerifiedEQ. +func ClaimsEmailVerified(v bool) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsPreferredUsername applies equality check predicate on the "claims_preferred_username" field. It's identical to ClaimsPreferredUsernameEQ. +func ClaimsPreferredUsername(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClaimsPreferredUsername, v)) +} + +// ConnectorID applies equality check predicate on the "connector_id" field. It's identical to ConnectorIDEQ. +func ConnectorID(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldConnectorID, v)) +} + +// ConnectorData applies equality check predicate on the "connector_data" field. It's identical to ConnectorDataEQ. +func ConnectorData(v []byte) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldConnectorData, v)) +} + +// Expiry applies equality check predicate on the "expiry" field. It's identical to ExpiryEQ. +func Expiry(v time.Time) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldExpiry, v)) +} + +// CodeChallenge applies equality check predicate on the "code_challenge" field. It's identical to CodeChallengeEQ. +func CodeChallenge(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldCodeChallenge, v)) +} + +// CodeChallengeMethod applies equality check predicate on the "code_challenge_method" field. It's identical to CodeChallengeMethodEQ. +func CodeChallengeMethod(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldCodeChallengeMethod, v)) +} + +// ClientIDEQ applies the EQ predicate on the "client_id" field. +func ClientIDEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClientID, v)) +} + +// ClientIDNEQ applies the NEQ predicate on the "client_id" field. +func ClientIDNEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldClientID, v)) +} + +// ClientIDIn applies the In predicate on the "client_id" field. +func ClientIDIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldClientID, vs...)) +} + +// ClientIDNotIn applies the NotIn predicate on the "client_id" field. +func ClientIDNotIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldClientID, vs...)) +} + +// ClientIDGT applies the GT predicate on the "client_id" field. +func ClientIDGT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldClientID, v)) +} + +// ClientIDGTE applies the GTE predicate on the "client_id" field. +func ClientIDGTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldClientID, v)) +} + +// ClientIDLT applies the LT predicate on the "client_id" field. +func ClientIDLT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldClientID, v)) +} + +// ClientIDLTE applies the LTE predicate on the "client_id" field. +func ClientIDLTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldClientID, v)) +} + +// ClientIDContains applies the Contains predicate on the "client_id" field. +func ClientIDContains(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContains(FieldClientID, v)) +} + +// ClientIDHasPrefix applies the HasPrefix predicate on the "client_id" field. +func ClientIDHasPrefix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasPrefix(FieldClientID, v)) +} + +// ClientIDHasSuffix applies the HasSuffix predicate on the "client_id" field. +func ClientIDHasSuffix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasSuffix(FieldClientID, v)) +} + +// ClientIDEqualFold applies the EqualFold predicate on the "client_id" field. +func ClientIDEqualFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldClientID, v)) +} + +// ClientIDContainsFold applies the ContainsFold predicate on the "client_id" field. +func ClientIDContainsFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldClientID, v)) +} + +// ScopesIsNil applies the IsNil predicate on the "scopes" field. +func ScopesIsNil() predicate.AuthCode { + return predicate.AuthCode(sql.FieldIsNull(FieldScopes)) +} + +// ScopesNotNil applies the NotNil predicate on the "scopes" field. +func ScopesNotNil() predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotNull(FieldScopes)) +} + +// NonceEQ applies the EQ predicate on the "nonce" field. +func NonceEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldNonce, v)) +} + +// NonceNEQ applies the NEQ predicate on the "nonce" field. +func NonceNEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldNonce, v)) +} + +// NonceIn applies the In predicate on the "nonce" field. +func NonceIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldNonce, vs...)) +} + +// NonceNotIn applies the NotIn predicate on the "nonce" field. +func NonceNotIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldNonce, vs...)) +} + +// NonceGT applies the GT predicate on the "nonce" field. +func NonceGT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldNonce, v)) +} + +// NonceGTE applies the GTE predicate on the "nonce" field. +func NonceGTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldNonce, v)) +} + +// NonceLT applies the LT predicate on the "nonce" field. +func NonceLT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldNonce, v)) +} + +// NonceLTE applies the LTE predicate on the "nonce" field. +func NonceLTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldNonce, v)) +} + +// NonceContains applies the Contains predicate on the "nonce" field. +func NonceContains(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContains(FieldNonce, v)) +} + +// NonceHasPrefix applies the HasPrefix predicate on the "nonce" field. +func NonceHasPrefix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasPrefix(FieldNonce, v)) +} + +// NonceHasSuffix applies the HasSuffix predicate on the "nonce" field. +func NonceHasSuffix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasSuffix(FieldNonce, v)) +} + +// NonceEqualFold applies the EqualFold predicate on the "nonce" field. +func NonceEqualFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldNonce, v)) +} + +// NonceContainsFold applies the ContainsFold predicate on the "nonce" field. +func NonceContainsFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldNonce, v)) +} + +// RedirectURIEQ applies the EQ predicate on the "redirect_uri" field. +func RedirectURIEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldRedirectURI, v)) +} + +// RedirectURINEQ applies the NEQ predicate on the "redirect_uri" field. +func RedirectURINEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldRedirectURI, v)) +} + +// RedirectURIIn applies the In predicate on the "redirect_uri" field. +func RedirectURIIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldRedirectURI, vs...)) +} + +// RedirectURINotIn applies the NotIn predicate on the "redirect_uri" field. +func RedirectURINotIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldRedirectURI, vs...)) +} + +// RedirectURIGT applies the GT predicate on the "redirect_uri" field. +func RedirectURIGT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldRedirectURI, v)) +} + +// RedirectURIGTE applies the GTE predicate on the "redirect_uri" field. +func RedirectURIGTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldRedirectURI, v)) +} + +// RedirectURILT applies the LT predicate on the "redirect_uri" field. +func RedirectURILT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldRedirectURI, v)) +} + +// RedirectURILTE applies the LTE predicate on the "redirect_uri" field. +func RedirectURILTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldRedirectURI, v)) +} + +// RedirectURIContains applies the Contains predicate on the "redirect_uri" field. +func RedirectURIContains(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContains(FieldRedirectURI, v)) +} + +// RedirectURIHasPrefix applies the HasPrefix predicate on the "redirect_uri" field. +func RedirectURIHasPrefix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasPrefix(FieldRedirectURI, v)) +} + +// RedirectURIHasSuffix applies the HasSuffix predicate on the "redirect_uri" field. +func RedirectURIHasSuffix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasSuffix(FieldRedirectURI, v)) +} + +// RedirectURIEqualFold applies the EqualFold predicate on the "redirect_uri" field. +func RedirectURIEqualFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldRedirectURI, v)) +} + +// RedirectURIContainsFold applies the ContainsFold predicate on the "redirect_uri" field. +func RedirectURIContainsFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldRedirectURI, v)) +} + +// ClaimsUserIDEQ applies the EQ predicate on the "claims_user_id" field. +func ClaimsUserIDEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClaimsUserID, v)) +} + +// ClaimsUserIDNEQ applies the NEQ predicate on the "claims_user_id" field. +func ClaimsUserIDNEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldClaimsUserID, v)) +} + +// ClaimsUserIDIn applies the In predicate on the "claims_user_id" field. +func ClaimsUserIDIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldClaimsUserID, vs...)) +} + +// ClaimsUserIDNotIn applies the NotIn predicate on the "claims_user_id" field. +func ClaimsUserIDNotIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldClaimsUserID, vs...)) +} + +// ClaimsUserIDGT applies the GT predicate on the "claims_user_id" field. +func ClaimsUserIDGT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldClaimsUserID, v)) +} + +// ClaimsUserIDGTE applies the GTE predicate on the "claims_user_id" field. +func ClaimsUserIDGTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldClaimsUserID, v)) +} + +// ClaimsUserIDLT applies the LT predicate on the "claims_user_id" field. +func ClaimsUserIDLT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldClaimsUserID, v)) +} + +// ClaimsUserIDLTE applies the LTE predicate on the "claims_user_id" field. +func ClaimsUserIDLTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldClaimsUserID, v)) +} + +// ClaimsUserIDContains applies the Contains predicate on the "claims_user_id" field. +func ClaimsUserIDContains(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContains(FieldClaimsUserID, v)) +} + +// ClaimsUserIDHasPrefix applies the HasPrefix predicate on the "claims_user_id" field. +func ClaimsUserIDHasPrefix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasPrefix(FieldClaimsUserID, v)) +} + +// ClaimsUserIDHasSuffix applies the HasSuffix predicate on the "claims_user_id" field. +func ClaimsUserIDHasSuffix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasSuffix(FieldClaimsUserID, v)) +} + +// ClaimsUserIDEqualFold applies the EqualFold predicate on the "claims_user_id" field. +func ClaimsUserIDEqualFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldClaimsUserID, v)) +} + +// ClaimsUserIDContainsFold applies the ContainsFold predicate on the "claims_user_id" field. +func ClaimsUserIDContainsFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldClaimsUserID, v)) +} + +// ClaimsUsernameEQ applies the EQ predicate on the "claims_username" field. +func ClaimsUsernameEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClaimsUsername, v)) +} + +// ClaimsUsernameNEQ applies the NEQ predicate on the "claims_username" field. +func ClaimsUsernameNEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldClaimsUsername, v)) +} + +// ClaimsUsernameIn applies the In predicate on the "claims_username" field. +func ClaimsUsernameIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldClaimsUsername, vs...)) +} + +// ClaimsUsernameNotIn applies the NotIn predicate on the "claims_username" field. +func ClaimsUsernameNotIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldClaimsUsername, vs...)) +} + +// ClaimsUsernameGT applies the GT predicate on the "claims_username" field. +func ClaimsUsernameGT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldClaimsUsername, v)) +} + +// ClaimsUsernameGTE applies the GTE predicate on the "claims_username" field. +func ClaimsUsernameGTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldClaimsUsername, v)) +} + +// ClaimsUsernameLT applies the LT predicate on the "claims_username" field. +func ClaimsUsernameLT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldClaimsUsername, v)) +} + +// ClaimsUsernameLTE applies the LTE predicate on the "claims_username" field. +func ClaimsUsernameLTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldClaimsUsername, v)) +} + +// ClaimsUsernameContains applies the Contains predicate on the "claims_username" field. +func ClaimsUsernameContains(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContains(FieldClaimsUsername, v)) +} + +// ClaimsUsernameHasPrefix applies the HasPrefix predicate on the "claims_username" field. +func ClaimsUsernameHasPrefix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasPrefix(FieldClaimsUsername, v)) +} + +// ClaimsUsernameHasSuffix applies the HasSuffix predicate on the "claims_username" field. +func ClaimsUsernameHasSuffix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasSuffix(FieldClaimsUsername, v)) +} + +// ClaimsUsernameEqualFold applies the EqualFold predicate on the "claims_username" field. +func ClaimsUsernameEqualFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldClaimsUsername, v)) +} + +// ClaimsUsernameContainsFold applies the ContainsFold predicate on the "claims_username" field. +func ClaimsUsernameContainsFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldClaimsUsername, v)) +} + +// ClaimsEmailEQ applies the EQ predicate on the "claims_email" field. +func ClaimsEmailEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailNEQ applies the NEQ predicate on the "claims_email" field. +func ClaimsEmailNEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailIn applies the In predicate on the "claims_email" field. +func ClaimsEmailIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldClaimsEmail, vs...)) +} + +// ClaimsEmailNotIn applies the NotIn predicate on the "claims_email" field. +func ClaimsEmailNotIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldClaimsEmail, vs...)) +} + +// ClaimsEmailGT applies the GT predicate on the "claims_email" field. +func ClaimsEmailGT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldClaimsEmail, v)) +} + +// ClaimsEmailGTE applies the GTE predicate on the "claims_email" field. +func ClaimsEmailGTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldClaimsEmail, v)) +} + +// ClaimsEmailLT applies the LT predicate on the "claims_email" field. +func ClaimsEmailLT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldClaimsEmail, v)) +} + +// ClaimsEmailLTE applies the LTE predicate on the "claims_email" field. +func ClaimsEmailLTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldClaimsEmail, v)) +} + +// ClaimsEmailContains applies the Contains predicate on the "claims_email" field. +func ClaimsEmailContains(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContains(FieldClaimsEmail, v)) +} + +// ClaimsEmailHasPrefix applies the HasPrefix predicate on the "claims_email" field. +func ClaimsEmailHasPrefix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasPrefix(FieldClaimsEmail, v)) +} + +// ClaimsEmailHasSuffix applies the HasSuffix predicate on the "claims_email" field. +func ClaimsEmailHasSuffix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasSuffix(FieldClaimsEmail, v)) +} + +// ClaimsEmailEqualFold applies the EqualFold predicate on the "claims_email" field. +func ClaimsEmailEqualFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldClaimsEmail, v)) +} + +// ClaimsEmailContainsFold applies the ContainsFold predicate on the "claims_email" field. +func ClaimsEmailContainsFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldClaimsEmail, v)) +} + +// ClaimsEmailVerifiedEQ applies the EQ predicate on the "claims_email_verified" field. +func ClaimsEmailVerifiedEQ(v bool) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsEmailVerifiedNEQ applies the NEQ predicate on the "claims_email_verified" field. +func ClaimsEmailVerifiedNEQ(v bool) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsGroupsIsNil applies the IsNil predicate on the "claims_groups" field. +func ClaimsGroupsIsNil() predicate.AuthCode { + return predicate.AuthCode(sql.FieldIsNull(FieldClaimsGroups)) +} + +// ClaimsGroupsNotNil applies the NotNil predicate on the "claims_groups" field. +func ClaimsGroupsNotNil() predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotNull(FieldClaimsGroups)) +} + +// ClaimsPreferredUsernameEQ applies the EQ predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameNEQ applies the NEQ predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameNEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameIn applies the In predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldClaimsPreferredUsername, vs...)) +} + +// ClaimsPreferredUsernameNotIn applies the NotIn predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameNotIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldClaimsPreferredUsername, vs...)) +} + +// ClaimsPreferredUsernameGT applies the GT predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameGT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameGTE applies the GTE predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameGTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameLT applies the LT predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameLT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameLTE applies the LTE predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameLTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameContains applies the Contains predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameContains(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContains(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameHasPrefix applies the HasPrefix predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameHasPrefix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasPrefix(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameHasSuffix applies the HasSuffix predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameHasSuffix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasSuffix(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameEqualFold applies the EqualFold predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameEqualFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameContainsFold applies the ContainsFold predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameContainsFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldClaimsPreferredUsername, v)) +} + +// ConnectorIDEQ applies the EQ predicate on the "connector_id" field. +func ConnectorIDEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldConnectorID, v)) +} + +// ConnectorIDNEQ applies the NEQ predicate on the "connector_id" field. +func ConnectorIDNEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldConnectorID, v)) +} + +// ConnectorIDIn applies the In predicate on the "connector_id" field. +func ConnectorIDIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldConnectorID, vs...)) +} + +// ConnectorIDNotIn applies the NotIn predicate on the "connector_id" field. +func ConnectorIDNotIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldConnectorID, vs...)) +} + +// ConnectorIDGT applies the GT predicate on the "connector_id" field. +func ConnectorIDGT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldConnectorID, v)) +} + +// ConnectorIDGTE applies the GTE predicate on the "connector_id" field. +func ConnectorIDGTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldConnectorID, v)) +} + +// ConnectorIDLT applies the LT predicate on the "connector_id" field. +func ConnectorIDLT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldConnectorID, v)) +} + +// ConnectorIDLTE applies the LTE predicate on the "connector_id" field. +func ConnectorIDLTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldConnectorID, v)) +} + +// ConnectorIDContains applies the Contains predicate on the "connector_id" field. +func ConnectorIDContains(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContains(FieldConnectorID, v)) +} + +// ConnectorIDHasPrefix applies the HasPrefix predicate on the "connector_id" field. +func ConnectorIDHasPrefix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasPrefix(FieldConnectorID, v)) +} + +// ConnectorIDHasSuffix applies the HasSuffix predicate on the "connector_id" field. +func ConnectorIDHasSuffix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasSuffix(FieldConnectorID, v)) +} + +// ConnectorIDEqualFold applies the EqualFold predicate on the "connector_id" field. +func ConnectorIDEqualFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldConnectorID, v)) +} + +// ConnectorIDContainsFold applies the ContainsFold predicate on the "connector_id" field. +func ConnectorIDContainsFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldConnectorID, v)) +} + +// ConnectorDataEQ applies the EQ predicate on the "connector_data" field. +func ConnectorDataEQ(v []byte) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldConnectorData, v)) +} + +// ConnectorDataNEQ applies the NEQ predicate on the "connector_data" field. +func ConnectorDataNEQ(v []byte) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldConnectorData, v)) +} + +// ConnectorDataIn applies the In predicate on the "connector_data" field. +func ConnectorDataIn(vs ...[]byte) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldConnectorData, vs...)) +} + +// ConnectorDataNotIn applies the NotIn predicate on the "connector_data" field. +func ConnectorDataNotIn(vs ...[]byte) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldConnectorData, vs...)) +} + +// ConnectorDataGT applies the GT predicate on the "connector_data" field. +func ConnectorDataGT(v []byte) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldConnectorData, v)) +} + +// ConnectorDataGTE applies the GTE predicate on the "connector_data" field. +func ConnectorDataGTE(v []byte) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldConnectorData, v)) +} + +// ConnectorDataLT applies the LT predicate on the "connector_data" field. +func ConnectorDataLT(v []byte) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldConnectorData, v)) +} + +// ConnectorDataLTE applies the LTE predicate on the "connector_data" field. +func ConnectorDataLTE(v []byte) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldConnectorData, v)) +} + +// ConnectorDataIsNil applies the IsNil predicate on the "connector_data" field. +func ConnectorDataIsNil() predicate.AuthCode { + return predicate.AuthCode(sql.FieldIsNull(FieldConnectorData)) +} + +// ConnectorDataNotNil applies the NotNil predicate on the "connector_data" field. +func ConnectorDataNotNil() predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotNull(FieldConnectorData)) +} + +// ExpiryEQ applies the EQ predicate on the "expiry" field. +func ExpiryEQ(v time.Time) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldExpiry, v)) +} + +// ExpiryNEQ applies the NEQ predicate on the "expiry" field. +func ExpiryNEQ(v time.Time) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldExpiry, v)) +} + +// ExpiryIn applies the In predicate on the "expiry" field. +func ExpiryIn(vs ...time.Time) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldExpiry, vs...)) +} + +// ExpiryNotIn applies the NotIn predicate on the "expiry" field. +func ExpiryNotIn(vs ...time.Time) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldExpiry, vs...)) +} + +// ExpiryGT applies the GT predicate on the "expiry" field. +func ExpiryGT(v time.Time) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldExpiry, v)) +} + +// ExpiryGTE applies the GTE predicate on the "expiry" field. +func ExpiryGTE(v time.Time) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldExpiry, v)) +} + +// ExpiryLT applies the LT predicate on the "expiry" field. +func ExpiryLT(v time.Time) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldExpiry, v)) +} + +// ExpiryLTE applies the LTE predicate on the "expiry" field. +func ExpiryLTE(v time.Time) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldExpiry, v)) +} + +// CodeChallengeEQ applies the EQ predicate on the "code_challenge" field. +func CodeChallengeEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldCodeChallenge, v)) +} + +// CodeChallengeNEQ applies the NEQ predicate on the "code_challenge" field. +func CodeChallengeNEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldCodeChallenge, v)) +} + +// CodeChallengeIn applies the In predicate on the "code_challenge" field. +func CodeChallengeIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldCodeChallenge, vs...)) +} + +// CodeChallengeNotIn applies the NotIn predicate on the "code_challenge" field. +func CodeChallengeNotIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldCodeChallenge, vs...)) +} + +// CodeChallengeGT applies the GT predicate on the "code_challenge" field. +func CodeChallengeGT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldCodeChallenge, v)) +} + +// CodeChallengeGTE applies the GTE predicate on the "code_challenge" field. +func CodeChallengeGTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldCodeChallenge, v)) +} + +// CodeChallengeLT applies the LT predicate on the "code_challenge" field. +func CodeChallengeLT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldCodeChallenge, v)) +} + +// CodeChallengeLTE applies the LTE predicate on the "code_challenge" field. +func CodeChallengeLTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldCodeChallenge, v)) +} + +// CodeChallengeContains applies the Contains predicate on the "code_challenge" field. +func CodeChallengeContains(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContains(FieldCodeChallenge, v)) +} + +// CodeChallengeHasPrefix applies the HasPrefix predicate on the "code_challenge" field. +func CodeChallengeHasPrefix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasPrefix(FieldCodeChallenge, v)) +} + +// CodeChallengeHasSuffix applies the HasSuffix predicate on the "code_challenge" field. +func CodeChallengeHasSuffix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasSuffix(FieldCodeChallenge, v)) +} + +// CodeChallengeEqualFold applies the EqualFold predicate on the "code_challenge" field. +func CodeChallengeEqualFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldCodeChallenge, v)) +} + +// CodeChallengeContainsFold applies the ContainsFold predicate on the "code_challenge" field. +func CodeChallengeContainsFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldCodeChallenge, v)) +} + +// CodeChallengeMethodEQ applies the EQ predicate on the "code_challenge_method" field. +func CodeChallengeMethodEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEQ(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodNEQ applies the NEQ predicate on the "code_challenge_method" field. +func CodeChallengeMethodNEQ(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNEQ(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodIn applies the In predicate on the "code_challenge_method" field. +func CodeChallengeMethodIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldIn(FieldCodeChallengeMethod, vs...)) +} + +// CodeChallengeMethodNotIn applies the NotIn predicate on the "code_challenge_method" field. +func CodeChallengeMethodNotIn(vs ...string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldNotIn(FieldCodeChallengeMethod, vs...)) +} + +// CodeChallengeMethodGT applies the GT predicate on the "code_challenge_method" field. +func CodeChallengeMethodGT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGT(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodGTE applies the GTE predicate on the "code_challenge_method" field. +func CodeChallengeMethodGTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldGTE(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodLT applies the LT predicate on the "code_challenge_method" field. +func CodeChallengeMethodLT(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLT(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodLTE applies the LTE predicate on the "code_challenge_method" field. +func CodeChallengeMethodLTE(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldLTE(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodContains applies the Contains predicate on the "code_challenge_method" field. +func CodeChallengeMethodContains(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContains(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodHasPrefix applies the HasPrefix predicate on the "code_challenge_method" field. +func CodeChallengeMethodHasPrefix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasPrefix(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodHasSuffix applies the HasSuffix predicate on the "code_challenge_method" field. +func CodeChallengeMethodHasSuffix(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldHasSuffix(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodEqualFold applies the EqualFold predicate on the "code_challenge_method" field. +func CodeChallengeMethodEqualFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldEqualFold(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodContainsFold applies the ContainsFold predicate on the "code_challenge_method" field. +func CodeChallengeMethodContainsFold(v string) predicate.AuthCode { + return predicate.AuthCode(sql.FieldContainsFold(FieldCodeChallengeMethod, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.AuthCode) predicate.AuthCode { + return predicate.AuthCode(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.AuthCode) predicate.AuthCode { + return predicate.AuthCode(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.AuthCode) predicate.AuthCode { + return predicate.AuthCode(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authcode_create.go b/vendor/github.com/dexidp/dex/storage/ent/db/authcode_create.go new file mode 100644 index 00000000..2441757b --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authcode_create.go @@ -0,0 +1,446 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authcode" +) + +// AuthCodeCreate is the builder for creating a AuthCode entity. +type AuthCodeCreate struct { + config + mutation *AuthCodeMutation + hooks []Hook +} + +// SetClientID sets the "client_id" field. +func (acc *AuthCodeCreate) SetClientID(s string) *AuthCodeCreate { + acc.mutation.SetClientID(s) + return acc +} + +// SetScopes sets the "scopes" field. +func (acc *AuthCodeCreate) SetScopes(s []string) *AuthCodeCreate { + acc.mutation.SetScopes(s) + return acc +} + +// SetNonce sets the "nonce" field. +func (acc *AuthCodeCreate) SetNonce(s string) *AuthCodeCreate { + acc.mutation.SetNonce(s) + return acc +} + +// SetRedirectURI sets the "redirect_uri" field. +func (acc *AuthCodeCreate) SetRedirectURI(s string) *AuthCodeCreate { + acc.mutation.SetRedirectURI(s) + return acc +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (acc *AuthCodeCreate) SetClaimsUserID(s string) *AuthCodeCreate { + acc.mutation.SetClaimsUserID(s) + return acc +} + +// SetClaimsUsername sets the "claims_username" field. +func (acc *AuthCodeCreate) SetClaimsUsername(s string) *AuthCodeCreate { + acc.mutation.SetClaimsUsername(s) + return acc +} + +// SetClaimsEmail sets the "claims_email" field. +func (acc *AuthCodeCreate) SetClaimsEmail(s string) *AuthCodeCreate { + acc.mutation.SetClaimsEmail(s) + return acc +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (acc *AuthCodeCreate) SetClaimsEmailVerified(b bool) *AuthCodeCreate { + acc.mutation.SetClaimsEmailVerified(b) + return acc +} + +// SetClaimsGroups sets the "claims_groups" field. +func (acc *AuthCodeCreate) SetClaimsGroups(s []string) *AuthCodeCreate { + acc.mutation.SetClaimsGroups(s) + return acc +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (acc *AuthCodeCreate) SetClaimsPreferredUsername(s string) *AuthCodeCreate { + acc.mutation.SetClaimsPreferredUsername(s) + return acc +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (acc *AuthCodeCreate) SetNillableClaimsPreferredUsername(s *string) *AuthCodeCreate { + if s != nil { + acc.SetClaimsPreferredUsername(*s) + } + return acc +} + +// SetConnectorID sets the "connector_id" field. +func (acc *AuthCodeCreate) SetConnectorID(s string) *AuthCodeCreate { + acc.mutation.SetConnectorID(s) + return acc +} + +// SetConnectorData sets the "connector_data" field. +func (acc *AuthCodeCreate) SetConnectorData(b []byte) *AuthCodeCreate { + acc.mutation.SetConnectorData(b) + return acc +} + +// SetExpiry sets the "expiry" field. +func (acc *AuthCodeCreate) SetExpiry(t time.Time) *AuthCodeCreate { + acc.mutation.SetExpiry(t) + return acc +} + +// SetCodeChallenge sets the "code_challenge" field. +func (acc *AuthCodeCreate) SetCodeChallenge(s string) *AuthCodeCreate { + acc.mutation.SetCodeChallenge(s) + return acc +} + +// SetNillableCodeChallenge sets the "code_challenge" field if the given value is not nil. +func (acc *AuthCodeCreate) SetNillableCodeChallenge(s *string) *AuthCodeCreate { + if s != nil { + acc.SetCodeChallenge(*s) + } + return acc +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (acc *AuthCodeCreate) SetCodeChallengeMethod(s string) *AuthCodeCreate { + acc.mutation.SetCodeChallengeMethod(s) + return acc +} + +// SetNillableCodeChallengeMethod sets the "code_challenge_method" field if the given value is not nil. +func (acc *AuthCodeCreate) SetNillableCodeChallengeMethod(s *string) *AuthCodeCreate { + if s != nil { + acc.SetCodeChallengeMethod(*s) + } + return acc +} + +// SetID sets the "id" field. +func (acc *AuthCodeCreate) SetID(s string) *AuthCodeCreate { + acc.mutation.SetID(s) + return acc +} + +// Mutation returns the AuthCodeMutation object of the builder. +func (acc *AuthCodeCreate) Mutation() *AuthCodeMutation { + return acc.mutation +} + +// Save creates the AuthCode in the database. +func (acc *AuthCodeCreate) Save(ctx context.Context) (*AuthCode, error) { + acc.defaults() + return withHooks(ctx, acc.sqlSave, acc.mutation, acc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (acc *AuthCodeCreate) SaveX(ctx context.Context) *AuthCode { + v, err := acc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (acc *AuthCodeCreate) Exec(ctx context.Context) error { + _, err := acc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (acc *AuthCodeCreate) ExecX(ctx context.Context) { + if err := acc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (acc *AuthCodeCreate) defaults() { + if _, ok := acc.mutation.ClaimsPreferredUsername(); !ok { + v := authcode.DefaultClaimsPreferredUsername + acc.mutation.SetClaimsPreferredUsername(v) + } + if _, ok := acc.mutation.CodeChallenge(); !ok { + v := authcode.DefaultCodeChallenge + acc.mutation.SetCodeChallenge(v) + } + if _, ok := acc.mutation.CodeChallengeMethod(); !ok { + v := authcode.DefaultCodeChallengeMethod + acc.mutation.SetCodeChallengeMethod(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (acc *AuthCodeCreate) check() error { + if _, ok := acc.mutation.ClientID(); !ok { + return &ValidationError{Name: "client_id", err: errors.New(`db: missing required field "AuthCode.client_id"`)} + } + if v, ok := acc.mutation.ClientID(); ok { + if err := authcode.ClientIDValidator(v); err != nil { + return &ValidationError{Name: "client_id", err: fmt.Errorf(`db: validator failed for field "AuthCode.client_id": %w`, err)} + } + } + if _, ok := acc.mutation.Nonce(); !ok { + return &ValidationError{Name: "nonce", err: errors.New(`db: missing required field "AuthCode.nonce"`)} + } + if v, ok := acc.mutation.Nonce(); ok { + if err := authcode.NonceValidator(v); err != nil { + return &ValidationError{Name: "nonce", err: fmt.Errorf(`db: validator failed for field "AuthCode.nonce": %w`, err)} + } + } + if _, ok := acc.mutation.RedirectURI(); !ok { + return &ValidationError{Name: "redirect_uri", err: errors.New(`db: missing required field "AuthCode.redirect_uri"`)} + } + if v, ok := acc.mutation.RedirectURI(); ok { + if err := authcode.RedirectURIValidator(v); err != nil { + return &ValidationError{Name: "redirect_uri", err: fmt.Errorf(`db: validator failed for field "AuthCode.redirect_uri": %w`, err)} + } + } + if _, ok := acc.mutation.ClaimsUserID(); !ok { + return &ValidationError{Name: "claims_user_id", err: errors.New(`db: missing required field "AuthCode.claims_user_id"`)} + } + if v, ok := acc.mutation.ClaimsUserID(); ok { + if err := authcode.ClaimsUserIDValidator(v); err != nil { + return &ValidationError{Name: "claims_user_id", err: fmt.Errorf(`db: validator failed for field "AuthCode.claims_user_id": %w`, err)} + } + } + if _, ok := acc.mutation.ClaimsUsername(); !ok { + return &ValidationError{Name: "claims_username", err: errors.New(`db: missing required field "AuthCode.claims_username"`)} + } + if v, ok := acc.mutation.ClaimsUsername(); ok { + if err := authcode.ClaimsUsernameValidator(v); err != nil { + return &ValidationError{Name: "claims_username", err: fmt.Errorf(`db: validator failed for field "AuthCode.claims_username": %w`, err)} + } + } + if _, ok := acc.mutation.ClaimsEmail(); !ok { + return &ValidationError{Name: "claims_email", err: errors.New(`db: missing required field "AuthCode.claims_email"`)} + } + if v, ok := acc.mutation.ClaimsEmail(); ok { + if err := authcode.ClaimsEmailValidator(v); err != nil { + return &ValidationError{Name: "claims_email", err: fmt.Errorf(`db: validator failed for field "AuthCode.claims_email": %w`, err)} + } + } + if _, ok := acc.mutation.ClaimsEmailVerified(); !ok { + return &ValidationError{Name: "claims_email_verified", err: errors.New(`db: missing required field "AuthCode.claims_email_verified"`)} + } + if _, ok := acc.mutation.ClaimsPreferredUsername(); !ok { + return &ValidationError{Name: "claims_preferred_username", err: errors.New(`db: missing required field "AuthCode.claims_preferred_username"`)} + } + if _, ok := acc.mutation.ConnectorID(); !ok { + return &ValidationError{Name: "connector_id", err: errors.New(`db: missing required field "AuthCode.connector_id"`)} + } + if v, ok := acc.mutation.ConnectorID(); ok { + if err := authcode.ConnectorIDValidator(v); err != nil { + return &ValidationError{Name: "connector_id", err: fmt.Errorf(`db: validator failed for field "AuthCode.connector_id": %w`, err)} + } + } + if _, ok := acc.mutation.Expiry(); !ok { + return &ValidationError{Name: "expiry", err: errors.New(`db: missing required field "AuthCode.expiry"`)} + } + if _, ok := acc.mutation.CodeChallenge(); !ok { + return &ValidationError{Name: "code_challenge", err: errors.New(`db: missing required field "AuthCode.code_challenge"`)} + } + if _, ok := acc.mutation.CodeChallengeMethod(); !ok { + return &ValidationError{Name: "code_challenge_method", err: errors.New(`db: missing required field "AuthCode.code_challenge_method"`)} + } + if v, ok := acc.mutation.ID(); ok { + if err := authcode.IDValidator(v); err != nil { + return &ValidationError{Name: "id", err: fmt.Errorf(`db: validator failed for field "AuthCode.id": %w`, err)} + } + } + return nil +} + +func (acc *AuthCodeCreate) sqlSave(ctx context.Context) (*AuthCode, error) { + if err := acc.check(); err != nil { + return nil, err + } + _node, _spec := acc.createSpec() + if err := sqlgraph.CreateNode(ctx, acc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected AuthCode.ID type: %T", _spec.ID.Value) + } + } + acc.mutation.id = &_node.ID + acc.mutation.done = true + return _node, nil +} + +func (acc *AuthCodeCreate) createSpec() (*AuthCode, *sqlgraph.CreateSpec) { + var ( + _node = &AuthCode{config: acc.config} + _spec = sqlgraph.NewCreateSpec(authcode.Table, sqlgraph.NewFieldSpec(authcode.FieldID, field.TypeString)) + ) + if id, ok := acc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := acc.mutation.ClientID(); ok { + _spec.SetField(authcode.FieldClientID, field.TypeString, value) + _node.ClientID = value + } + if value, ok := acc.mutation.Scopes(); ok { + _spec.SetField(authcode.FieldScopes, field.TypeJSON, value) + _node.Scopes = value + } + if value, ok := acc.mutation.Nonce(); ok { + _spec.SetField(authcode.FieldNonce, field.TypeString, value) + _node.Nonce = value + } + if value, ok := acc.mutation.RedirectURI(); ok { + _spec.SetField(authcode.FieldRedirectURI, field.TypeString, value) + _node.RedirectURI = value + } + if value, ok := acc.mutation.ClaimsUserID(); ok { + _spec.SetField(authcode.FieldClaimsUserID, field.TypeString, value) + _node.ClaimsUserID = value + } + if value, ok := acc.mutation.ClaimsUsername(); ok { + _spec.SetField(authcode.FieldClaimsUsername, field.TypeString, value) + _node.ClaimsUsername = value + } + if value, ok := acc.mutation.ClaimsEmail(); ok { + _spec.SetField(authcode.FieldClaimsEmail, field.TypeString, value) + _node.ClaimsEmail = value + } + if value, ok := acc.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(authcode.FieldClaimsEmailVerified, field.TypeBool, value) + _node.ClaimsEmailVerified = value + } + if value, ok := acc.mutation.ClaimsGroups(); ok { + _spec.SetField(authcode.FieldClaimsGroups, field.TypeJSON, value) + _node.ClaimsGroups = value + } + if value, ok := acc.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(authcode.FieldClaimsPreferredUsername, field.TypeString, value) + _node.ClaimsPreferredUsername = value + } + if value, ok := acc.mutation.ConnectorID(); ok { + _spec.SetField(authcode.FieldConnectorID, field.TypeString, value) + _node.ConnectorID = value + } + if value, ok := acc.mutation.ConnectorData(); ok { + _spec.SetField(authcode.FieldConnectorData, field.TypeBytes, value) + _node.ConnectorData = &value + } + if value, ok := acc.mutation.Expiry(); ok { + _spec.SetField(authcode.FieldExpiry, field.TypeTime, value) + _node.Expiry = value + } + if value, ok := acc.mutation.CodeChallenge(); ok { + _spec.SetField(authcode.FieldCodeChallenge, field.TypeString, value) + _node.CodeChallenge = value + } + if value, ok := acc.mutation.CodeChallengeMethod(); ok { + _spec.SetField(authcode.FieldCodeChallengeMethod, field.TypeString, value) + _node.CodeChallengeMethod = value + } + return _node, _spec +} + +// AuthCodeCreateBulk is the builder for creating many AuthCode entities in bulk. +type AuthCodeCreateBulk struct { + config + builders []*AuthCodeCreate +} + +// Save creates the AuthCode entities in the database. +func (accb *AuthCodeCreateBulk) Save(ctx context.Context) ([]*AuthCode, error) { + specs := make([]*sqlgraph.CreateSpec, len(accb.builders)) + nodes := make([]*AuthCode, len(accb.builders)) + mutators := make([]Mutator, len(accb.builders)) + for i := range accb.builders { + func(i int, root context.Context) { + builder := accb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AuthCodeMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, accb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, accb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, accb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (accb *AuthCodeCreateBulk) SaveX(ctx context.Context) []*AuthCode { + v, err := accb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (accb *AuthCodeCreateBulk) Exec(ctx context.Context) error { + _, err := accb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (accb *AuthCodeCreateBulk) ExecX(ctx context.Context) { + if err := accb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authcode_delete.go b/vendor/github.com/dexidp/dex/storage/ent/db/authcode_delete.go new file mode 100644 index 00000000..1f758fcc --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authcode_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authcode" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// AuthCodeDelete is the builder for deleting a AuthCode entity. +type AuthCodeDelete struct { + config + hooks []Hook + mutation *AuthCodeMutation +} + +// Where appends a list predicates to the AuthCodeDelete builder. +func (acd *AuthCodeDelete) Where(ps ...predicate.AuthCode) *AuthCodeDelete { + acd.mutation.Where(ps...) + return acd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (acd *AuthCodeDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, acd.sqlExec, acd.mutation, acd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (acd *AuthCodeDelete) ExecX(ctx context.Context) int { + n, err := acd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (acd *AuthCodeDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(authcode.Table, sqlgraph.NewFieldSpec(authcode.FieldID, field.TypeString)) + if ps := acd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, acd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + acd.mutation.done = true + return affected, err +} + +// AuthCodeDeleteOne is the builder for deleting a single AuthCode entity. +type AuthCodeDeleteOne struct { + acd *AuthCodeDelete +} + +// Where appends a list predicates to the AuthCodeDelete builder. +func (acdo *AuthCodeDeleteOne) Where(ps ...predicate.AuthCode) *AuthCodeDeleteOne { + acdo.acd.mutation.Where(ps...) + return acdo +} + +// Exec executes the deletion query. +func (acdo *AuthCodeDeleteOne) Exec(ctx context.Context) error { + n, err := acdo.acd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{authcode.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (acdo *AuthCodeDeleteOne) ExecX(ctx context.Context) { + if err := acdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authcode_query.go b/vendor/github.com/dexidp/dex/storage/ent/db/authcode_query.go new file mode 100644 index 00000000..75e5805b --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authcode_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authcode" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// AuthCodeQuery is the builder for querying AuthCode entities. +type AuthCodeQuery struct { + config + ctx *QueryContext + order []authcode.OrderOption + inters []Interceptor + predicates []predicate.AuthCode + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AuthCodeQuery builder. +func (acq *AuthCodeQuery) Where(ps ...predicate.AuthCode) *AuthCodeQuery { + acq.predicates = append(acq.predicates, ps...) + return acq +} + +// Limit the number of records to be returned by this query. +func (acq *AuthCodeQuery) Limit(limit int) *AuthCodeQuery { + acq.ctx.Limit = &limit + return acq +} + +// Offset to start from. +func (acq *AuthCodeQuery) Offset(offset int) *AuthCodeQuery { + acq.ctx.Offset = &offset + return acq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (acq *AuthCodeQuery) Unique(unique bool) *AuthCodeQuery { + acq.ctx.Unique = &unique + return acq +} + +// Order specifies how the records should be ordered. +func (acq *AuthCodeQuery) Order(o ...authcode.OrderOption) *AuthCodeQuery { + acq.order = append(acq.order, o...) + return acq +} + +// First returns the first AuthCode entity from the query. +// Returns a *NotFoundError when no AuthCode was found. +func (acq *AuthCodeQuery) First(ctx context.Context) (*AuthCode, error) { + nodes, err := acq.Limit(1).All(setContextOp(ctx, acq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{authcode.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (acq *AuthCodeQuery) FirstX(ctx context.Context) *AuthCode { + node, err := acq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first AuthCode ID from the query. +// Returns a *NotFoundError when no AuthCode ID was found. +func (acq *AuthCodeQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = acq.Limit(1).IDs(setContextOp(ctx, acq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{authcode.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (acq *AuthCodeQuery) FirstIDX(ctx context.Context) string { + id, err := acq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single AuthCode entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one AuthCode entity is found. +// Returns a *NotFoundError when no AuthCode entities are found. +func (acq *AuthCodeQuery) Only(ctx context.Context) (*AuthCode, error) { + nodes, err := acq.Limit(2).All(setContextOp(ctx, acq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{authcode.Label} + default: + return nil, &NotSingularError{authcode.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (acq *AuthCodeQuery) OnlyX(ctx context.Context) *AuthCode { + node, err := acq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only AuthCode ID in the query. +// Returns a *NotSingularError when more than one AuthCode ID is found. +// Returns a *NotFoundError when no entities are found. +func (acq *AuthCodeQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = acq.Limit(2).IDs(setContextOp(ctx, acq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{authcode.Label} + default: + err = &NotSingularError{authcode.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (acq *AuthCodeQuery) OnlyIDX(ctx context.Context) string { + id, err := acq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of AuthCodes. +func (acq *AuthCodeQuery) All(ctx context.Context) ([]*AuthCode, error) { + ctx = setContextOp(ctx, acq.ctx, "All") + if err := acq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*AuthCode, *AuthCodeQuery]() + return withInterceptors[[]*AuthCode](ctx, acq, qr, acq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (acq *AuthCodeQuery) AllX(ctx context.Context) []*AuthCode { + nodes, err := acq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of AuthCode IDs. +func (acq *AuthCodeQuery) IDs(ctx context.Context) (ids []string, err error) { + if acq.ctx.Unique == nil && acq.path != nil { + acq.Unique(true) + } + ctx = setContextOp(ctx, acq.ctx, "IDs") + if err = acq.Select(authcode.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (acq *AuthCodeQuery) IDsX(ctx context.Context) []string { + ids, err := acq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (acq *AuthCodeQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, acq.ctx, "Count") + if err := acq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, acq, querierCount[*AuthCodeQuery](), acq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (acq *AuthCodeQuery) CountX(ctx context.Context) int { + count, err := acq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (acq *AuthCodeQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, acq.ctx, "Exist") + switch _, err := acq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (acq *AuthCodeQuery) ExistX(ctx context.Context) bool { + exist, err := acq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AuthCodeQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (acq *AuthCodeQuery) Clone() *AuthCodeQuery { + if acq == nil { + return nil + } + return &AuthCodeQuery{ + config: acq.config, + ctx: acq.ctx.Clone(), + order: append([]authcode.OrderOption{}, acq.order...), + inters: append([]Interceptor{}, acq.inters...), + predicates: append([]predicate.AuthCode{}, acq.predicates...), + // clone intermediate query. + sql: acq.sql.Clone(), + path: acq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// ClientID string `json:"client_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.AuthCode.Query(). +// GroupBy(authcode.FieldClientID). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (acq *AuthCodeQuery) GroupBy(field string, fields ...string) *AuthCodeGroupBy { + acq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AuthCodeGroupBy{build: acq} + grbuild.flds = &acq.ctx.Fields + grbuild.label = authcode.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// ClientID string `json:"client_id,omitempty"` +// } +// +// client.AuthCode.Query(). +// Select(authcode.FieldClientID). +// Scan(ctx, &v) +func (acq *AuthCodeQuery) Select(fields ...string) *AuthCodeSelect { + acq.ctx.Fields = append(acq.ctx.Fields, fields...) + sbuild := &AuthCodeSelect{AuthCodeQuery: acq} + sbuild.label = authcode.Label + sbuild.flds, sbuild.scan = &acq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AuthCodeSelect configured with the given aggregations. +func (acq *AuthCodeQuery) Aggregate(fns ...AggregateFunc) *AuthCodeSelect { + return acq.Select().Aggregate(fns...) +} + +func (acq *AuthCodeQuery) prepareQuery(ctx context.Context) error { + for _, inter := range acq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, acq); err != nil { + return err + } + } + } + for _, f := range acq.ctx.Fields { + if !authcode.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if acq.path != nil { + prev, err := acq.path(ctx) + if err != nil { + return err + } + acq.sql = prev + } + return nil +} + +func (acq *AuthCodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AuthCode, error) { + var ( + nodes = []*AuthCode{} + _spec = acq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AuthCode).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &AuthCode{config: acq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, acq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (acq *AuthCodeQuery) sqlCount(ctx context.Context) (int, error) { + _spec := acq.querySpec() + _spec.Node.Columns = acq.ctx.Fields + if len(acq.ctx.Fields) > 0 { + _spec.Unique = acq.ctx.Unique != nil && *acq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, acq.driver, _spec) +} + +func (acq *AuthCodeQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(authcode.Table, authcode.Columns, sqlgraph.NewFieldSpec(authcode.FieldID, field.TypeString)) + _spec.From = acq.sql + if unique := acq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if acq.path != nil { + _spec.Unique = true + } + if fields := acq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, authcode.FieldID) + for i := range fields { + if fields[i] != authcode.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := acq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := acq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := acq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := acq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (acq *AuthCodeQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(acq.driver.Dialect()) + t1 := builder.Table(authcode.Table) + columns := acq.ctx.Fields + if len(columns) == 0 { + columns = authcode.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if acq.sql != nil { + selector = acq.sql + selector.Select(selector.Columns(columns...)...) + } + if acq.ctx.Unique != nil && *acq.ctx.Unique { + selector.Distinct() + } + for _, p := range acq.predicates { + p(selector) + } + for _, p := range acq.order { + p(selector) + } + if offset := acq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := acq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AuthCodeGroupBy is the group-by builder for AuthCode entities. +type AuthCodeGroupBy struct { + selector + build *AuthCodeQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (acgb *AuthCodeGroupBy) Aggregate(fns ...AggregateFunc) *AuthCodeGroupBy { + acgb.fns = append(acgb.fns, fns...) + return acgb +} + +// Scan applies the selector query and scans the result into the given value. +func (acgb *AuthCodeGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, acgb.build.ctx, "GroupBy") + if err := acgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AuthCodeQuery, *AuthCodeGroupBy](ctx, acgb.build, acgb, acgb.build.inters, v) +} + +func (acgb *AuthCodeGroupBy) sqlScan(ctx context.Context, root *AuthCodeQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(acgb.fns)) + for _, fn := range acgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*acgb.flds)+len(acgb.fns)) + for _, f := range *acgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*acgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := acgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AuthCodeSelect is the builder for selecting fields of AuthCode entities. +type AuthCodeSelect struct { + *AuthCodeQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (acs *AuthCodeSelect) Aggregate(fns ...AggregateFunc) *AuthCodeSelect { + acs.fns = append(acs.fns, fns...) + return acs +} + +// Scan applies the selector query and scans the result into the given value. +func (acs *AuthCodeSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, acs.ctx, "Select") + if err := acs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AuthCodeQuery, *AuthCodeSelect](ctx, acs.AuthCodeQuery, acs, acs.inters, v) +} + +func (acs *AuthCodeSelect) sqlScan(ctx context.Context, root *AuthCodeQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(acs.fns)) + for _, fn := range acs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*acs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := acs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authcode_update.go b/vendor/github.com/dexidp/dex/storage/ent/db/authcode_update.go new file mode 100644 index 00000000..0d6cd176 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authcode_update.go @@ -0,0 +1,679 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authcode" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// AuthCodeUpdate is the builder for updating AuthCode entities. +type AuthCodeUpdate struct { + config + hooks []Hook + mutation *AuthCodeMutation +} + +// Where appends a list predicates to the AuthCodeUpdate builder. +func (acu *AuthCodeUpdate) Where(ps ...predicate.AuthCode) *AuthCodeUpdate { + acu.mutation.Where(ps...) + return acu +} + +// SetClientID sets the "client_id" field. +func (acu *AuthCodeUpdate) SetClientID(s string) *AuthCodeUpdate { + acu.mutation.SetClientID(s) + return acu +} + +// SetScopes sets the "scopes" field. +func (acu *AuthCodeUpdate) SetScopes(s []string) *AuthCodeUpdate { + acu.mutation.SetScopes(s) + return acu +} + +// AppendScopes appends s to the "scopes" field. +func (acu *AuthCodeUpdate) AppendScopes(s []string) *AuthCodeUpdate { + acu.mutation.AppendScopes(s) + return acu +} + +// ClearScopes clears the value of the "scopes" field. +func (acu *AuthCodeUpdate) ClearScopes() *AuthCodeUpdate { + acu.mutation.ClearScopes() + return acu +} + +// SetNonce sets the "nonce" field. +func (acu *AuthCodeUpdate) SetNonce(s string) *AuthCodeUpdate { + acu.mutation.SetNonce(s) + return acu +} + +// SetRedirectURI sets the "redirect_uri" field. +func (acu *AuthCodeUpdate) SetRedirectURI(s string) *AuthCodeUpdate { + acu.mutation.SetRedirectURI(s) + return acu +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (acu *AuthCodeUpdate) SetClaimsUserID(s string) *AuthCodeUpdate { + acu.mutation.SetClaimsUserID(s) + return acu +} + +// SetClaimsUsername sets the "claims_username" field. +func (acu *AuthCodeUpdate) SetClaimsUsername(s string) *AuthCodeUpdate { + acu.mutation.SetClaimsUsername(s) + return acu +} + +// SetClaimsEmail sets the "claims_email" field. +func (acu *AuthCodeUpdate) SetClaimsEmail(s string) *AuthCodeUpdate { + acu.mutation.SetClaimsEmail(s) + return acu +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (acu *AuthCodeUpdate) SetClaimsEmailVerified(b bool) *AuthCodeUpdate { + acu.mutation.SetClaimsEmailVerified(b) + return acu +} + +// SetClaimsGroups sets the "claims_groups" field. +func (acu *AuthCodeUpdate) SetClaimsGroups(s []string) *AuthCodeUpdate { + acu.mutation.SetClaimsGroups(s) + return acu +} + +// AppendClaimsGroups appends s to the "claims_groups" field. +func (acu *AuthCodeUpdate) AppendClaimsGroups(s []string) *AuthCodeUpdate { + acu.mutation.AppendClaimsGroups(s) + return acu +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (acu *AuthCodeUpdate) ClearClaimsGroups() *AuthCodeUpdate { + acu.mutation.ClearClaimsGroups() + return acu +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (acu *AuthCodeUpdate) SetClaimsPreferredUsername(s string) *AuthCodeUpdate { + acu.mutation.SetClaimsPreferredUsername(s) + return acu +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (acu *AuthCodeUpdate) SetNillableClaimsPreferredUsername(s *string) *AuthCodeUpdate { + if s != nil { + acu.SetClaimsPreferredUsername(*s) + } + return acu +} + +// SetConnectorID sets the "connector_id" field. +func (acu *AuthCodeUpdate) SetConnectorID(s string) *AuthCodeUpdate { + acu.mutation.SetConnectorID(s) + return acu +} + +// SetConnectorData sets the "connector_data" field. +func (acu *AuthCodeUpdate) SetConnectorData(b []byte) *AuthCodeUpdate { + acu.mutation.SetConnectorData(b) + return acu +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (acu *AuthCodeUpdate) ClearConnectorData() *AuthCodeUpdate { + acu.mutation.ClearConnectorData() + return acu +} + +// SetExpiry sets the "expiry" field. +func (acu *AuthCodeUpdate) SetExpiry(t time.Time) *AuthCodeUpdate { + acu.mutation.SetExpiry(t) + return acu +} + +// SetCodeChallenge sets the "code_challenge" field. +func (acu *AuthCodeUpdate) SetCodeChallenge(s string) *AuthCodeUpdate { + acu.mutation.SetCodeChallenge(s) + return acu +} + +// SetNillableCodeChallenge sets the "code_challenge" field if the given value is not nil. +func (acu *AuthCodeUpdate) SetNillableCodeChallenge(s *string) *AuthCodeUpdate { + if s != nil { + acu.SetCodeChallenge(*s) + } + return acu +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (acu *AuthCodeUpdate) SetCodeChallengeMethod(s string) *AuthCodeUpdate { + acu.mutation.SetCodeChallengeMethod(s) + return acu +} + +// SetNillableCodeChallengeMethod sets the "code_challenge_method" field if the given value is not nil. +func (acu *AuthCodeUpdate) SetNillableCodeChallengeMethod(s *string) *AuthCodeUpdate { + if s != nil { + acu.SetCodeChallengeMethod(*s) + } + return acu +} + +// Mutation returns the AuthCodeMutation object of the builder. +func (acu *AuthCodeUpdate) Mutation() *AuthCodeMutation { + return acu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (acu *AuthCodeUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, acu.sqlSave, acu.mutation, acu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (acu *AuthCodeUpdate) SaveX(ctx context.Context) int { + affected, err := acu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (acu *AuthCodeUpdate) Exec(ctx context.Context) error { + _, err := acu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (acu *AuthCodeUpdate) ExecX(ctx context.Context) { + if err := acu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (acu *AuthCodeUpdate) check() error { + if v, ok := acu.mutation.ClientID(); ok { + if err := authcode.ClientIDValidator(v); err != nil { + return &ValidationError{Name: "client_id", err: fmt.Errorf(`db: validator failed for field "AuthCode.client_id": %w`, err)} + } + } + if v, ok := acu.mutation.Nonce(); ok { + if err := authcode.NonceValidator(v); err != nil { + return &ValidationError{Name: "nonce", err: fmt.Errorf(`db: validator failed for field "AuthCode.nonce": %w`, err)} + } + } + if v, ok := acu.mutation.RedirectURI(); ok { + if err := authcode.RedirectURIValidator(v); err != nil { + return &ValidationError{Name: "redirect_uri", err: fmt.Errorf(`db: validator failed for field "AuthCode.redirect_uri": %w`, err)} + } + } + if v, ok := acu.mutation.ClaimsUserID(); ok { + if err := authcode.ClaimsUserIDValidator(v); err != nil { + return &ValidationError{Name: "claims_user_id", err: fmt.Errorf(`db: validator failed for field "AuthCode.claims_user_id": %w`, err)} + } + } + if v, ok := acu.mutation.ClaimsUsername(); ok { + if err := authcode.ClaimsUsernameValidator(v); err != nil { + return &ValidationError{Name: "claims_username", err: fmt.Errorf(`db: validator failed for field "AuthCode.claims_username": %w`, err)} + } + } + if v, ok := acu.mutation.ClaimsEmail(); ok { + if err := authcode.ClaimsEmailValidator(v); err != nil { + return &ValidationError{Name: "claims_email", err: fmt.Errorf(`db: validator failed for field "AuthCode.claims_email": %w`, err)} + } + } + if v, ok := acu.mutation.ConnectorID(); ok { + if err := authcode.ConnectorIDValidator(v); err != nil { + return &ValidationError{Name: "connector_id", err: fmt.Errorf(`db: validator failed for field "AuthCode.connector_id": %w`, err)} + } + } + return nil +} + +func (acu *AuthCodeUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := acu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(authcode.Table, authcode.Columns, sqlgraph.NewFieldSpec(authcode.FieldID, field.TypeString)) + if ps := acu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := acu.mutation.ClientID(); ok { + _spec.SetField(authcode.FieldClientID, field.TypeString, value) + } + if value, ok := acu.mutation.Scopes(); ok { + _spec.SetField(authcode.FieldScopes, field.TypeJSON, value) + } + if value, ok := acu.mutation.AppendedScopes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, authcode.FieldScopes, value) + }) + } + if acu.mutation.ScopesCleared() { + _spec.ClearField(authcode.FieldScopes, field.TypeJSON) + } + if value, ok := acu.mutation.Nonce(); ok { + _spec.SetField(authcode.FieldNonce, field.TypeString, value) + } + if value, ok := acu.mutation.RedirectURI(); ok { + _spec.SetField(authcode.FieldRedirectURI, field.TypeString, value) + } + if value, ok := acu.mutation.ClaimsUserID(); ok { + _spec.SetField(authcode.FieldClaimsUserID, field.TypeString, value) + } + if value, ok := acu.mutation.ClaimsUsername(); ok { + _spec.SetField(authcode.FieldClaimsUsername, field.TypeString, value) + } + if value, ok := acu.mutation.ClaimsEmail(); ok { + _spec.SetField(authcode.FieldClaimsEmail, field.TypeString, value) + } + if value, ok := acu.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(authcode.FieldClaimsEmailVerified, field.TypeBool, value) + } + if value, ok := acu.mutation.ClaimsGroups(); ok { + _spec.SetField(authcode.FieldClaimsGroups, field.TypeJSON, value) + } + if value, ok := acu.mutation.AppendedClaimsGroups(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, authcode.FieldClaimsGroups, value) + }) + } + if acu.mutation.ClaimsGroupsCleared() { + _spec.ClearField(authcode.FieldClaimsGroups, field.TypeJSON) + } + if value, ok := acu.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(authcode.FieldClaimsPreferredUsername, field.TypeString, value) + } + if value, ok := acu.mutation.ConnectorID(); ok { + _spec.SetField(authcode.FieldConnectorID, field.TypeString, value) + } + if value, ok := acu.mutation.ConnectorData(); ok { + _spec.SetField(authcode.FieldConnectorData, field.TypeBytes, value) + } + if acu.mutation.ConnectorDataCleared() { + _spec.ClearField(authcode.FieldConnectorData, field.TypeBytes) + } + if value, ok := acu.mutation.Expiry(); ok { + _spec.SetField(authcode.FieldExpiry, field.TypeTime, value) + } + if value, ok := acu.mutation.CodeChallenge(); ok { + _spec.SetField(authcode.FieldCodeChallenge, field.TypeString, value) + } + if value, ok := acu.mutation.CodeChallengeMethod(); ok { + _spec.SetField(authcode.FieldCodeChallengeMethod, field.TypeString, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, acu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{authcode.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + acu.mutation.done = true + return n, nil +} + +// AuthCodeUpdateOne is the builder for updating a single AuthCode entity. +type AuthCodeUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AuthCodeMutation +} + +// SetClientID sets the "client_id" field. +func (acuo *AuthCodeUpdateOne) SetClientID(s string) *AuthCodeUpdateOne { + acuo.mutation.SetClientID(s) + return acuo +} + +// SetScopes sets the "scopes" field. +func (acuo *AuthCodeUpdateOne) SetScopes(s []string) *AuthCodeUpdateOne { + acuo.mutation.SetScopes(s) + return acuo +} + +// AppendScopes appends s to the "scopes" field. +func (acuo *AuthCodeUpdateOne) AppendScopes(s []string) *AuthCodeUpdateOne { + acuo.mutation.AppendScopes(s) + return acuo +} + +// ClearScopes clears the value of the "scopes" field. +func (acuo *AuthCodeUpdateOne) ClearScopes() *AuthCodeUpdateOne { + acuo.mutation.ClearScopes() + return acuo +} + +// SetNonce sets the "nonce" field. +func (acuo *AuthCodeUpdateOne) SetNonce(s string) *AuthCodeUpdateOne { + acuo.mutation.SetNonce(s) + return acuo +} + +// SetRedirectURI sets the "redirect_uri" field. +func (acuo *AuthCodeUpdateOne) SetRedirectURI(s string) *AuthCodeUpdateOne { + acuo.mutation.SetRedirectURI(s) + return acuo +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (acuo *AuthCodeUpdateOne) SetClaimsUserID(s string) *AuthCodeUpdateOne { + acuo.mutation.SetClaimsUserID(s) + return acuo +} + +// SetClaimsUsername sets the "claims_username" field. +func (acuo *AuthCodeUpdateOne) SetClaimsUsername(s string) *AuthCodeUpdateOne { + acuo.mutation.SetClaimsUsername(s) + return acuo +} + +// SetClaimsEmail sets the "claims_email" field. +func (acuo *AuthCodeUpdateOne) SetClaimsEmail(s string) *AuthCodeUpdateOne { + acuo.mutation.SetClaimsEmail(s) + return acuo +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (acuo *AuthCodeUpdateOne) SetClaimsEmailVerified(b bool) *AuthCodeUpdateOne { + acuo.mutation.SetClaimsEmailVerified(b) + return acuo +} + +// SetClaimsGroups sets the "claims_groups" field. +func (acuo *AuthCodeUpdateOne) SetClaimsGroups(s []string) *AuthCodeUpdateOne { + acuo.mutation.SetClaimsGroups(s) + return acuo +} + +// AppendClaimsGroups appends s to the "claims_groups" field. +func (acuo *AuthCodeUpdateOne) AppendClaimsGroups(s []string) *AuthCodeUpdateOne { + acuo.mutation.AppendClaimsGroups(s) + return acuo +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (acuo *AuthCodeUpdateOne) ClearClaimsGroups() *AuthCodeUpdateOne { + acuo.mutation.ClearClaimsGroups() + return acuo +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (acuo *AuthCodeUpdateOne) SetClaimsPreferredUsername(s string) *AuthCodeUpdateOne { + acuo.mutation.SetClaimsPreferredUsername(s) + return acuo +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (acuo *AuthCodeUpdateOne) SetNillableClaimsPreferredUsername(s *string) *AuthCodeUpdateOne { + if s != nil { + acuo.SetClaimsPreferredUsername(*s) + } + return acuo +} + +// SetConnectorID sets the "connector_id" field. +func (acuo *AuthCodeUpdateOne) SetConnectorID(s string) *AuthCodeUpdateOne { + acuo.mutation.SetConnectorID(s) + return acuo +} + +// SetConnectorData sets the "connector_data" field. +func (acuo *AuthCodeUpdateOne) SetConnectorData(b []byte) *AuthCodeUpdateOne { + acuo.mutation.SetConnectorData(b) + return acuo +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (acuo *AuthCodeUpdateOne) ClearConnectorData() *AuthCodeUpdateOne { + acuo.mutation.ClearConnectorData() + return acuo +} + +// SetExpiry sets the "expiry" field. +func (acuo *AuthCodeUpdateOne) SetExpiry(t time.Time) *AuthCodeUpdateOne { + acuo.mutation.SetExpiry(t) + return acuo +} + +// SetCodeChallenge sets the "code_challenge" field. +func (acuo *AuthCodeUpdateOne) SetCodeChallenge(s string) *AuthCodeUpdateOne { + acuo.mutation.SetCodeChallenge(s) + return acuo +} + +// SetNillableCodeChallenge sets the "code_challenge" field if the given value is not nil. +func (acuo *AuthCodeUpdateOne) SetNillableCodeChallenge(s *string) *AuthCodeUpdateOne { + if s != nil { + acuo.SetCodeChallenge(*s) + } + return acuo +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (acuo *AuthCodeUpdateOne) SetCodeChallengeMethod(s string) *AuthCodeUpdateOne { + acuo.mutation.SetCodeChallengeMethod(s) + return acuo +} + +// SetNillableCodeChallengeMethod sets the "code_challenge_method" field if the given value is not nil. +func (acuo *AuthCodeUpdateOne) SetNillableCodeChallengeMethod(s *string) *AuthCodeUpdateOne { + if s != nil { + acuo.SetCodeChallengeMethod(*s) + } + return acuo +} + +// Mutation returns the AuthCodeMutation object of the builder. +func (acuo *AuthCodeUpdateOne) Mutation() *AuthCodeMutation { + return acuo.mutation +} + +// Where appends a list predicates to the AuthCodeUpdate builder. +func (acuo *AuthCodeUpdateOne) Where(ps ...predicate.AuthCode) *AuthCodeUpdateOne { + acuo.mutation.Where(ps...) + return acuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (acuo *AuthCodeUpdateOne) Select(field string, fields ...string) *AuthCodeUpdateOne { + acuo.fields = append([]string{field}, fields...) + return acuo +} + +// Save executes the query and returns the updated AuthCode entity. +func (acuo *AuthCodeUpdateOne) Save(ctx context.Context) (*AuthCode, error) { + return withHooks(ctx, acuo.sqlSave, acuo.mutation, acuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (acuo *AuthCodeUpdateOne) SaveX(ctx context.Context) *AuthCode { + node, err := acuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (acuo *AuthCodeUpdateOne) Exec(ctx context.Context) error { + _, err := acuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (acuo *AuthCodeUpdateOne) ExecX(ctx context.Context) { + if err := acuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (acuo *AuthCodeUpdateOne) check() error { + if v, ok := acuo.mutation.ClientID(); ok { + if err := authcode.ClientIDValidator(v); err != nil { + return &ValidationError{Name: "client_id", err: fmt.Errorf(`db: validator failed for field "AuthCode.client_id": %w`, err)} + } + } + if v, ok := acuo.mutation.Nonce(); ok { + if err := authcode.NonceValidator(v); err != nil { + return &ValidationError{Name: "nonce", err: fmt.Errorf(`db: validator failed for field "AuthCode.nonce": %w`, err)} + } + } + if v, ok := acuo.mutation.RedirectURI(); ok { + if err := authcode.RedirectURIValidator(v); err != nil { + return &ValidationError{Name: "redirect_uri", err: fmt.Errorf(`db: validator failed for field "AuthCode.redirect_uri": %w`, err)} + } + } + if v, ok := acuo.mutation.ClaimsUserID(); ok { + if err := authcode.ClaimsUserIDValidator(v); err != nil { + return &ValidationError{Name: "claims_user_id", err: fmt.Errorf(`db: validator failed for field "AuthCode.claims_user_id": %w`, err)} + } + } + if v, ok := acuo.mutation.ClaimsUsername(); ok { + if err := authcode.ClaimsUsernameValidator(v); err != nil { + return &ValidationError{Name: "claims_username", err: fmt.Errorf(`db: validator failed for field "AuthCode.claims_username": %w`, err)} + } + } + if v, ok := acuo.mutation.ClaimsEmail(); ok { + if err := authcode.ClaimsEmailValidator(v); err != nil { + return &ValidationError{Name: "claims_email", err: fmt.Errorf(`db: validator failed for field "AuthCode.claims_email": %w`, err)} + } + } + if v, ok := acuo.mutation.ConnectorID(); ok { + if err := authcode.ConnectorIDValidator(v); err != nil { + return &ValidationError{Name: "connector_id", err: fmt.Errorf(`db: validator failed for field "AuthCode.connector_id": %w`, err)} + } + } + return nil +} + +func (acuo *AuthCodeUpdateOne) sqlSave(ctx context.Context) (_node *AuthCode, err error) { + if err := acuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(authcode.Table, authcode.Columns, sqlgraph.NewFieldSpec(authcode.FieldID, field.TypeString)) + id, ok := acuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "AuthCode.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := acuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, authcode.FieldID) + for _, f := range fields { + if !authcode.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != authcode.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := acuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := acuo.mutation.ClientID(); ok { + _spec.SetField(authcode.FieldClientID, field.TypeString, value) + } + if value, ok := acuo.mutation.Scopes(); ok { + _spec.SetField(authcode.FieldScopes, field.TypeJSON, value) + } + if value, ok := acuo.mutation.AppendedScopes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, authcode.FieldScopes, value) + }) + } + if acuo.mutation.ScopesCleared() { + _spec.ClearField(authcode.FieldScopes, field.TypeJSON) + } + if value, ok := acuo.mutation.Nonce(); ok { + _spec.SetField(authcode.FieldNonce, field.TypeString, value) + } + if value, ok := acuo.mutation.RedirectURI(); ok { + _spec.SetField(authcode.FieldRedirectURI, field.TypeString, value) + } + if value, ok := acuo.mutation.ClaimsUserID(); ok { + _spec.SetField(authcode.FieldClaimsUserID, field.TypeString, value) + } + if value, ok := acuo.mutation.ClaimsUsername(); ok { + _spec.SetField(authcode.FieldClaimsUsername, field.TypeString, value) + } + if value, ok := acuo.mutation.ClaimsEmail(); ok { + _spec.SetField(authcode.FieldClaimsEmail, field.TypeString, value) + } + if value, ok := acuo.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(authcode.FieldClaimsEmailVerified, field.TypeBool, value) + } + if value, ok := acuo.mutation.ClaimsGroups(); ok { + _spec.SetField(authcode.FieldClaimsGroups, field.TypeJSON, value) + } + if value, ok := acuo.mutation.AppendedClaimsGroups(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, authcode.FieldClaimsGroups, value) + }) + } + if acuo.mutation.ClaimsGroupsCleared() { + _spec.ClearField(authcode.FieldClaimsGroups, field.TypeJSON) + } + if value, ok := acuo.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(authcode.FieldClaimsPreferredUsername, field.TypeString, value) + } + if value, ok := acuo.mutation.ConnectorID(); ok { + _spec.SetField(authcode.FieldConnectorID, field.TypeString, value) + } + if value, ok := acuo.mutation.ConnectorData(); ok { + _spec.SetField(authcode.FieldConnectorData, field.TypeBytes, value) + } + if acuo.mutation.ConnectorDataCleared() { + _spec.ClearField(authcode.FieldConnectorData, field.TypeBytes) + } + if value, ok := acuo.mutation.Expiry(); ok { + _spec.SetField(authcode.FieldExpiry, field.TypeTime, value) + } + if value, ok := acuo.mutation.CodeChallenge(); ok { + _spec.SetField(authcode.FieldCodeChallenge, field.TypeString, value) + } + if value, ok := acuo.mutation.CodeChallengeMethod(); ok { + _spec.SetField(authcode.FieldCodeChallengeMethod, field.TypeString, value) + } + _node = &AuthCode{config: acuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, acuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{authcode.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + acuo.mutation.done = true + return _node, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authrequest.go b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest.go new file mode 100644 index 00000000..b95592e5 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest.go @@ -0,0 +1,326 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/authrequest" +) + +// AuthRequest is the model entity for the AuthRequest schema. +type AuthRequest struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // ClientID holds the value of the "client_id" field. + ClientID string `json:"client_id,omitempty"` + // Scopes holds the value of the "scopes" field. + Scopes []string `json:"scopes,omitempty"` + // ResponseTypes holds the value of the "response_types" field. + ResponseTypes []string `json:"response_types,omitempty"` + // RedirectURI holds the value of the "redirect_uri" field. + RedirectURI string `json:"redirect_uri,omitempty"` + // Nonce holds the value of the "nonce" field. + Nonce string `json:"nonce,omitempty"` + // State holds the value of the "state" field. + State string `json:"state,omitempty"` + // ForceApprovalPrompt holds the value of the "force_approval_prompt" field. + ForceApprovalPrompt bool `json:"force_approval_prompt,omitempty"` + // LoggedIn holds the value of the "logged_in" field. + LoggedIn bool `json:"logged_in,omitempty"` + // ClaimsUserID holds the value of the "claims_user_id" field. + ClaimsUserID string `json:"claims_user_id,omitempty"` + // ClaimsUsername holds the value of the "claims_username" field. + ClaimsUsername string `json:"claims_username,omitempty"` + // ClaimsEmail holds the value of the "claims_email" field. + ClaimsEmail string `json:"claims_email,omitempty"` + // ClaimsEmailVerified holds the value of the "claims_email_verified" field. + ClaimsEmailVerified bool `json:"claims_email_verified,omitempty"` + // ClaimsGroups holds the value of the "claims_groups" field. + ClaimsGroups []string `json:"claims_groups,omitempty"` + // ClaimsPreferredUsername holds the value of the "claims_preferred_username" field. + ClaimsPreferredUsername string `json:"claims_preferred_username,omitempty"` + // ConnectorID holds the value of the "connector_id" field. + ConnectorID string `json:"connector_id,omitempty"` + // ConnectorData holds the value of the "connector_data" field. + ConnectorData *[]byte `json:"connector_data,omitempty"` + // Expiry holds the value of the "expiry" field. + Expiry time.Time `json:"expiry,omitempty"` + // CodeChallenge holds the value of the "code_challenge" field. + CodeChallenge string `json:"code_challenge,omitempty"` + // CodeChallengeMethod holds the value of the "code_challenge_method" field. + CodeChallengeMethod string `json:"code_challenge_method,omitempty"` + // HmacKey holds the value of the "hmac_key" field. + HmacKey []byte `json:"hmac_key,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*AuthRequest) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case authrequest.FieldScopes, authrequest.FieldResponseTypes, authrequest.FieldClaimsGroups, authrequest.FieldConnectorData, authrequest.FieldHmacKey: + values[i] = new([]byte) + case authrequest.FieldForceApprovalPrompt, authrequest.FieldLoggedIn, authrequest.FieldClaimsEmailVerified: + values[i] = new(sql.NullBool) + case authrequest.FieldID, authrequest.FieldClientID, authrequest.FieldRedirectURI, authrequest.FieldNonce, authrequest.FieldState, authrequest.FieldClaimsUserID, authrequest.FieldClaimsUsername, authrequest.FieldClaimsEmail, authrequest.FieldClaimsPreferredUsername, authrequest.FieldConnectorID, authrequest.FieldCodeChallenge, authrequest.FieldCodeChallengeMethod: + values[i] = new(sql.NullString) + case authrequest.FieldExpiry: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the AuthRequest fields. +func (ar *AuthRequest) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case authrequest.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + ar.ID = value.String + } + case authrequest.FieldClientID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field client_id", values[i]) + } else if value.Valid { + ar.ClientID = value.String + } + case authrequest.FieldScopes: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field scopes", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &ar.Scopes); err != nil { + return fmt.Errorf("unmarshal field scopes: %w", err) + } + } + case authrequest.FieldResponseTypes: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field response_types", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &ar.ResponseTypes); err != nil { + return fmt.Errorf("unmarshal field response_types: %w", err) + } + } + case authrequest.FieldRedirectURI: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field redirect_uri", values[i]) + } else if value.Valid { + ar.RedirectURI = value.String + } + case authrequest.FieldNonce: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field nonce", values[i]) + } else if value.Valid { + ar.Nonce = value.String + } + case authrequest.FieldState: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field state", values[i]) + } else if value.Valid { + ar.State = value.String + } + case authrequest.FieldForceApprovalPrompt: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field force_approval_prompt", values[i]) + } else if value.Valid { + ar.ForceApprovalPrompt = value.Bool + } + case authrequest.FieldLoggedIn: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field logged_in", values[i]) + } else if value.Valid { + ar.LoggedIn = value.Bool + } + case authrequest.FieldClaimsUserID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_user_id", values[i]) + } else if value.Valid { + ar.ClaimsUserID = value.String + } + case authrequest.FieldClaimsUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_username", values[i]) + } else if value.Valid { + ar.ClaimsUsername = value.String + } + case authrequest.FieldClaimsEmail: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_email", values[i]) + } else if value.Valid { + ar.ClaimsEmail = value.String + } + case authrequest.FieldClaimsEmailVerified: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field claims_email_verified", values[i]) + } else if value.Valid { + ar.ClaimsEmailVerified = value.Bool + } + case authrequest.FieldClaimsGroups: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field claims_groups", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &ar.ClaimsGroups); err != nil { + return fmt.Errorf("unmarshal field claims_groups: %w", err) + } + } + case authrequest.FieldClaimsPreferredUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_preferred_username", values[i]) + } else if value.Valid { + ar.ClaimsPreferredUsername = value.String + } + case authrequest.FieldConnectorID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field connector_id", values[i]) + } else if value.Valid { + ar.ConnectorID = value.String + } + case authrequest.FieldConnectorData: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field connector_data", values[i]) + } else if value != nil { + ar.ConnectorData = value + } + case authrequest.FieldExpiry: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expiry", values[i]) + } else if value.Valid { + ar.Expiry = value.Time + } + case authrequest.FieldCodeChallenge: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field code_challenge", values[i]) + } else if value.Valid { + ar.CodeChallenge = value.String + } + case authrequest.FieldCodeChallengeMethod: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field code_challenge_method", values[i]) + } else if value.Valid { + ar.CodeChallengeMethod = value.String + } + case authrequest.FieldHmacKey: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field hmac_key", values[i]) + } else if value != nil { + ar.HmacKey = *value + } + default: + ar.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the AuthRequest. +// This includes values selected through modifiers, order, etc. +func (ar *AuthRequest) Value(name string) (ent.Value, error) { + return ar.selectValues.Get(name) +} + +// Update returns a builder for updating this AuthRequest. +// Note that you need to call AuthRequest.Unwrap() before calling this method if this AuthRequest +// was returned from a transaction, and the transaction was committed or rolled back. +func (ar *AuthRequest) Update() *AuthRequestUpdateOne { + return NewAuthRequestClient(ar.config).UpdateOne(ar) +} + +// Unwrap unwraps the AuthRequest entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (ar *AuthRequest) Unwrap() *AuthRequest { + _tx, ok := ar.config.driver.(*txDriver) + if !ok { + panic("db: AuthRequest is not a transactional entity") + } + ar.config.driver = _tx.drv + return ar +} + +// String implements the fmt.Stringer. +func (ar *AuthRequest) String() string { + var builder strings.Builder + builder.WriteString("AuthRequest(") + builder.WriteString(fmt.Sprintf("id=%v, ", ar.ID)) + builder.WriteString("client_id=") + builder.WriteString(ar.ClientID) + builder.WriteString(", ") + builder.WriteString("scopes=") + builder.WriteString(fmt.Sprintf("%v", ar.Scopes)) + builder.WriteString(", ") + builder.WriteString("response_types=") + builder.WriteString(fmt.Sprintf("%v", ar.ResponseTypes)) + builder.WriteString(", ") + builder.WriteString("redirect_uri=") + builder.WriteString(ar.RedirectURI) + builder.WriteString(", ") + builder.WriteString("nonce=") + builder.WriteString(ar.Nonce) + builder.WriteString(", ") + builder.WriteString("state=") + builder.WriteString(ar.State) + builder.WriteString(", ") + builder.WriteString("force_approval_prompt=") + builder.WriteString(fmt.Sprintf("%v", ar.ForceApprovalPrompt)) + builder.WriteString(", ") + builder.WriteString("logged_in=") + builder.WriteString(fmt.Sprintf("%v", ar.LoggedIn)) + builder.WriteString(", ") + builder.WriteString("claims_user_id=") + builder.WriteString(ar.ClaimsUserID) + builder.WriteString(", ") + builder.WriteString("claims_username=") + builder.WriteString(ar.ClaimsUsername) + builder.WriteString(", ") + builder.WriteString("claims_email=") + builder.WriteString(ar.ClaimsEmail) + builder.WriteString(", ") + builder.WriteString("claims_email_verified=") + builder.WriteString(fmt.Sprintf("%v", ar.ClaimsEmailVerified)) + builder.WriteString(", ") + builder.WriteString("claims_groups=") + builder.WriteString(fmt.Sprintf("%v", ar.ClaimsGroups)) + builder.WriteString(", ") + builder.WriteString("claims_preferred_username=") + builder.WriteString(ar.ClaimsPreferredUsername) + builder.WriteString(", ") + builder.WriteString("connector_id=") + builder.WriteString(ar.ConnectorID) + builder.WriteString(", ") + if v := ar.ConnectorData; v != nil { + builder.WriteString("connector_data=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("expiry=") + builder.WriteString(ar.Expiry.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("code_challenge=") + builder.WriteString(ar.CodeChallenge) + builder.WriteString(", ") + builder.WriteString("code_challenge_method=") + builder.WriteString(ar.CodeChallengeMethod) + builder.WriteString(", ") + builder.WriteString("hmac_key=") + builder.WriteString(fmt.Sprintf("%v", ar.HmacKey)) + builder.WriteByte(')') + return builder.String() +} + +// AuthRequests is a parsable slice of AuthRequest. +type AuthRequests []*AuthRequest diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authrequest/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest/BUILD new file mode 100644 index 00000000..f64a2ac2 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "authrequest", + srcs = [ + "authrequest.go", + "where.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/authrequest", + importpath = "github.com/dexidp/dex/storage/ent/db/authrequest", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authrequest/authrequest.go b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest/authrequest.go new file mode 100644 index 00000000..0998c799 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest/authrequest.go @@ -0,0 +1,185 @@ +// Code generated by ent, DO NOT EDIT. + +package authrequest + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the authrequest type in the database. + Label = "auth_request" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldClientID holds the string denoting the client_id field in the database. + FieldClientID = "client_id" + // FieldScopes holds the string denoting the scopes field in the database. + FieldScopes = "scopes" + // FieldResponseTypes holds the string denoting the response_types field in the database. + FieldResponseTypes = "response_types" + // FieldRedirectURI holds the string denoting the redirect_uri field in the database. + FieldRedirectURI = "redirect_uri" + // FieldNonce holds the string denoting the nonce field in the database. + FieldNonce = "nonce" + // FieldState holds the string denoting the state field in the database. + FieldState = "state" + // FieldForceApprovalPrompt holds the string denoting the force_approval_prompt field in the database. + FieldForceApprovalPrompt = "force_approval_prompt" + // FieldLoggedIn holds the string denoting the logged_in field in the database. + FieldLoggedIn = "logged_in" + // FieldClaimsUserID holds the string denoting the claims_user_id field in the database. + FieldClaimsUserID = "claims_user_id" + // FieldClaimsUsername holds the string denoting the claims_username field in the database. + FieldClaimsUsername = "claims_username" + // FieldClaimsEmail holds the string denoting the claims_email field in the database. + FieldClaimsEmail = "claims_email" + // FieldClaimsEmailVerified holds the string denoting the claims_email_verified field in the database. + FieldClaimsEmailVerified = "claims_email_verified" + // FieldClaimsGroups holds the string denoting the claims_groups field in the database. + FieldClaimsGroups = "claims_groups" + // FieldClaimsPreferredUsername holds the string denoting the claims_preferred_username field in the database. + FieldClaimsPreferredUsername = "claims_preferred_username" + // FieldConnectorID holds the string denoting the connector_id field in the database. + FieldConnectorID = "connector_id" + // FieldConnectorData holds the string denoting the connector_data field in the database. + FieldConnectorData = "connector_data" + // FieldExpiry holds the string denoting the expiry field in the database. + FieldExpiry = "expiry" + // FieldCodeChallenge holds the string denoting the code_challenge field in the database. + FieldCodeChallenge = "code_challenge" + // FieldCodeChallengeMethod holds the string denoting the code_challenge_method field in the database. + FieldCodeChallengeMethod = "code_challenge_method" + // FieldHmacKey holds the string denoting the hmac_key field in the database. + FieldHmacKey = "hmac_key" + // Table holds the table name of the authrequest in the database. + Table = "auth_requests" +) + +// Columns holds all SQL columns for authrequest fields. +var Columns = []string{ + FieldID, + FieldClientID, + FieldScopes, + FieldResponseTypes, + FieldRedirectURI, + FieldNonce, + FieldState, + FieldForceApprovalPrompt, + FieldLoggedIn, + FieldClaimsUserID, + FieldClaimsUsername, + FieldClaimsEmail, + FieldClaimsEmailVerified, + FieldClaimsGroups, + FieldClaimsPreferredUsername, + FieldConnectorID, + FieldConnectorData, + FieldExpiry, + FieldCodeChallenge, + FieldCodeChallengeMethod, + FieldHmacKey, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultClaimsPreferredUsername holds the default value on creation for the "claims_preferred_username" field. + DefaultClaimsPreferredUsername string + // DefaultCodeChallenge holds the default value on creation for the "code_challenge" field. + DefaultCodeChallenge string + // DefaultCodeChallengeMethod holds the default value on creation for the "code_challenge_method" field. + DefaultCodeChallengeMethod string + // IDValidator is a validator for the "id" field. It is called by the builders before save. + IDValidator func(string) error +) + +// OrderOption defines the ordering options for the AuthRequest queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByClientID orders the results by the client_id field. +func ByClientID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClientID, opts...).ToFunc() +} + +// ByRedirectURI orders the results by the redirect_uri field. +func ByRedirectURI(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRedirectURI, opts...).ToFunc() +} + +// ByNonce orders the results by the nonce field. +func ByNonce(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNonce, opts...).ToFunc() +} + +// ByState orders the results by the state field. +func ByState(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldState, opts...).ToFunc() +} + +// ByForceApprovalPrompt orders the results by the force_approval_prompt field. +func ByForceApprovalPrompt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldForceApprovalPrompt, opts...).ToFunc() +} + +// ByLoggedIn orders the results by the logged_in field. +func ByLoggedIn(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLoggedIn, opts...).ToFunc() +} + +// ByClaimsUserID orders the results by the claims_user_id field. +func ByClaimsUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsUserID, opts...).ToFunc() +} + +// ByClaimsUsername orders the results by the claims_username field. +func ByClaimsUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsUsername, opts...).ToFunc() +} + +// ByClaimsEmail orders the results by the claims_email field. +func ByClaimsEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsEmail, opts...).ToFunc() +} + +// ByClaimsEmailVerified orders the results by the claims_email_verified field. +func ByClaimsEmailVerified(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsEmailVerified, opts...).ToFunc() +} + +// ByClaimsPreferredUsername orders the results by the claims_preferred_username field. +func ByClaimsPreferredUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsPreferredUsername, opts...).ToFunc() +} + +// ByConnectorID orders the results by the connector_id field. +func ByConnectorID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldConnectorID, opts...).ToFunc() +} + +// ByExpiry orders the results by the expiry field. +func ByExpiry(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiry, opts...).ToFunc() +} + +// ByCodeChallenge orders the results by the code_challenge field. +func ByCodeChallenge(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCodeChallenge, opts...).ToFunc() +} + +// ByCodeChallengeMethod orders the results by the code_challenge_method field. +func ByCodeChallengeMethod(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCodeChallengeMethod, opts...).ToFunc() +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authrequest/where.go b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest/where.go new file mode 100644 index 00000000..10bd7447 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest/where.go @@ -0,0 +1,1087 @@ +// Code generated by ent, DO NOT EDIT. + +package authrequest + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldID, id)) +} + +// ClientID applies equality check predicate on the "client_id" field. It's identical to ClientIDEQ. +func ClientID(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClientID, v)) +} + +// RedirectURI applies equality check predicate on the "redirect_uri" field. It's identical to RedirectURIEQ. +func RedirectURI(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldRedirectURI, v)) +} + +// Nonce applies equality check predicate on the "nonce" field. It's identical to NonceEQ. +func Nonce(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldNonce, v)) +} + +// State applies equality check predicate on the "state" field. It's identical to StateEQ. +func State(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldState, v)) +} + +// ForceApprovalPrompt applies equality check predicate on the "force_approval_prompt" field. It's identical to ForceApprovalPromptEQ. +func ForceApprovalPrompt(v bool) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldForceApprovalPrompt, v)) +} + +// LoggedIn applies equality check predicate on the "logged_in" field. It's identical to LoggedInEQ. +func LoggedIn(v bool) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldLoggedIn, v)) +} + +// ClaimsUserID applies equality check predicate on the "claims_user_id" field. It's identical to ClaimsUserIDEQ. +func ClaimsUserID(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClaimsUserID, v)) +} + +// ClaimsUsername applies equality check predicate on the "claims_username" field. It's identical to ClaimsUsernameEQ. +func ClaimsUsername(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClaimsUsername, v)) +} + +// ClaimsEmail applies equality check predicate on the "claims_email" field. It's identical to ClaimsEmailEQ. +func ClaimsEmail(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailVerified applies equality check predicate on the "claims_email_verified" field. It's identical to ClaimsEmailVerifiedEQ. +func ClaimsEmailVerified(v bool) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsPreferredUsername applies equality check predicate on the "claims_preferred_username" field. It's identical to ClaimsPreferredUsernameEQ. +func ClaimsPreferredUsername(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClaimsPreferredUsername, v)) +} + +// ConnectorID applies equality check predicate on the "connector_id" field. It's identical to ConnectorIDEQ. +func ConnectorID(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldConnectorID, v)) +} + +// ConnectorData applies equality check predicate on the "connector_data" field. It's identical to ConnectorDataEQ. +func ConnectorData(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldConnectorData, v)) +} + +// Expiry applies equality check predicate on the "expiry" field. It's identical to ExpiryEQ. +func Expiry(v time.Time) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldExpiry, v)) +} + +// CodeChallenge applies equality check predicate on the "code_challenge" field. It's identical to CodeChallengeEQ. +func CodeChallenge(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldCodeChallenge, v)) +} + +// CodeChallengeMethod applies equality check predicate on the "code_challenge_method" field. It's identical to CodeChallengeMethodEQ. +func CodeChallengeMethod(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldCodeChallengeMethod, v)) +} + +// HmacKey applies equality check predicate on the "hmac_key" field. It's identical to HmacKeyEQ. +func HmacKey(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldHmacKey, v)) +} + +// ClientIDEQ applies the EQ predicate on the "client_id" field. +func ClientIDEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClientID, v)) +} + +// ClientIDNEQ applies the NEQ predicate on the "client_id" field. +func ClientIDNEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldClientID, v)) +} + +// ClientIDIn applies the In predicate on the "client_id" field. +func ClientIDIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldClientID, vs...)) +} + +// ClientIDNotIn applies the NotIn predicate on the "client_id" field. +func ClientIDNotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldClientID, vs...)) +} + +// ClientIDGT applies the GT predicate on the "client_id" field. +func ClientIDGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldClientID, v)) +} + +// ClientIDGTE applies the GTE predicate on the "client_id" field. +func ClientIDGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldClientID, v)) +} + +// ClientIDLT applies the LT predicate on the "client_id" field. +func ClientIDLT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldClientID, v)) +} + +// ClientIDLTE applies the LTE predicate on the "client_id" field. +func ClientIDLTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldClientID, v)) +} + +// ClientIDContains applies the Contains predicate on the "client_id" field. +func ClientIDContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldClientID, v)) +} + +// ClientIDHasPrefix applies the HasPrefix predicate on the "client_id" field. +func ClientIDHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldClientID, v)) +} + +// ClientIDHasSuffix applies the HasSuffix predicate on the "client_id" field. +func ClientIDHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldClientID, v)) +} + +// ClientIDEqualFold applies the EqualFold predicate on the "client_id" field. +func ClientIDEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldClientID, v)) +} + +// ClientIDContainsFold applies the ContainsFold predicate on the "client_id" field. +func ClientIDContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldClientID, v)) +} + +// ScopesIsNil applies the IsNil predicate on the "scopes" field. +func ScopesIsNil() predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIsNull(FieldScopes)) +} + +// ScopesNotNil applies the NotNil predicate on the "scopes" field. +func ScopesNotNil() predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotNull(FieldScopes)) +} + +// ResponseTypesIsNil applies the IsNil predicate on the "response_types" field. +func ResponseTypesIsNil() predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIsNull(FieldResponseTypes)) +} + +// ResponseTypesNotNil applies the NotNil predicate on the "response_types" field. +func ResponseTypesNotNil() predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotNull(FieldResponseTypes)) +} + +// RedirectURIEQ applies the EQ predicate on the "redirect_uri" field. +func RedirectURIEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldRedirectURI, v)) +} + +// RedirectURINEQ applies the NEQ predicate on the "redirect_uri" field. +func RedirectURINEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldRedirectURI, v)) +} + +// RedirectURIIn applies the In predicate on the "redirect_uri" field. +func RedirectURIIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldRedirectURI, vs...)) +} + +// RedirectURINotIn applies the NotIn predicate on the "redirect_uri" field. +func RedirectURINotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldRedirectURI, vs...)) +} + +// RedirectURIGT applies the GT predicate on the "redirect_uri" field. +func RedirectURIGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldRedirectURI, v)) +} + +// RedirectURIGTE applies the GTE predicate on the "redirect_uri" field. +func RedirectURIGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldRedirectURI, v)) +} + +// RedirectURILT applies the LT predicate on the "redirect_uri" field. +func RedirectURILT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldRedirectURI, v)) +} + +// RedirectURILTE applies the LTE predicate on the "redirect_uri" field. +func RedirectURILTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldRedirectURI, v)) +} + +// RedirectURIContains applies the Contains predicate on the "redirect_uri" field. +func RedirectURIContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldRedirectURI, v)) +} + +// RedirectURIHasPrefix applies the HasPrefix predicate on the "redirect_uri" field. +func RedirectURIHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldRedirectURI, v)) +} + +// RedirectURIHasSuffix applies the HasSuffix predicate on the "redirect_uri" field. +func RedirectURIHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldRedirectURI, v)) +} + +// RedirectURIEqualFold applies the EqualFold predicate on the "redirect_uri" field. +func RedirectURIEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldRedirectURI, v)) +} + +// RedirectURIContainsFold applies the ContainsFold predicate on the "redirect_uri" field. +func RedirectURIContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldRedirectURI, v)) +} + +// NonceEQ applies the EQ predicate on the "nonce" field. +func NonceEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldNonce, v)) +} + +// NonceNEQ applies the NEQ predicate on the "nonce" field. +func NonceNEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldNonce, v)) +} + +// NonceIn applies the In predicate on the "nonce" field. +func NonceIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldNonce, vs...)) +} + +// NonceNotIn applies the NotIn predicate on the "nonce" field. +func NonceNotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldNonce, vs...)) +} + +// NonceGT applies the GT predicate on the "nonce" field. +func NonceGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldNonce, v)) +} + +// NonceGTE applies the GTE predicate on the "nonce" field. +func NonceGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldNonce, v)) +} + +// NonceLT applies the LT predicate on the "nonce" field. +func NonceLT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldNonce, v)) +} + +// NonceLTE applies the LTE predicate on the "nonce" field. +func NonceLTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldNonce, v)) +} + +// NonceContains applies the Contains predicate on the "nonce" field. +func NonceContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldNonce, v)) +} + +// NonceHasPrefix applies the HasPrefix predicate on the "nonce" field. +func NonceHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldNonce, v)) +} + +// NonceHasSuffix applies the HasSuffix predicate on the "nonce" field. +func NonceHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldNonce, v)) +} + +// NonceEqualFold applies the EqualFold predicate on the "nonce" field. +func NonceEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldNonce, v)) +} + +// NonceContainsFold applies the ContainsFold predicate on the "nonce" field. +func NonceContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldNonce, v)) +} + +// StateEQ applies the EQ predicate on the "state" field. +func StateEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldState, v)) +} + +// StateNEQ applies the NEQ predicate on the "state" field. +func StateNEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldState, v)) +} + +// StateIn applies the In predicate on the "state" field. +func StateIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldState, vs...)) +} + +// StateNotIn applies the NotIn predicate on the "state" field. +func StateNotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldState, vs...)) +} + +// StateGT applies the GT predicate on the "state" field. +func StateGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldState, v)) +} + +// StateGTE applies the GTE predicate on the "state" field. +func StateGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldState, v)) +} + +// StateLT applies the LT predicate on the "state" field. +func StateLT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldState, v)) +} + +// StateLTE applies the LTE predicate on the "state" field. +func StateLTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldState, v)) +} + +// StateContains applies the Contains predicate on the "state" field. +func StateContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldState, v)) +} + +// StateHasPrefix applies the HasPrefix predicate on the "state" field. +func StateHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldState, v)) +} + +// StateHasSuffix applies the HasSuffix predicate on the "state" field. +func StateHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldState, v)) +} + +// StateEqualFold applies the EqualFold predicate on the "state" field. +func StateEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldState, v)) +} + +// StateContainsFold applies the ContainsFold predicate on the "state" field. +func StateContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldState, v)) +} + +// ForceApprovalPromptEQ applies the EQ predicate on the "force_approval_prompt" field. +func ForceApprovalPromptEQ(v bool) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldForceApprovalPrompt, v)) +} + +// ForceApprovalPromptNEQ applies the NEQ predicate on the "force_approval_prompt" field. +func ForceApprovalPromptNEQ(v bool) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldForceApprovalPrompt, v)) +} + +// LoggedInEQ applies the EQ predicate on the "logged_in" field. +func LoggedInEQ(v bool) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldLoggedIn, v)) +} + +// LoggedInNEQ applies the NEQ predicate on the "logged_in" field. +func LoggedInNEQ(v bool) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldLoggedIn, v)) +} + +// ClaimsUserIDEQ applies the EQ predicate on the "claims_user_id" field. +func ClaimsUserIDEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClaimsUserID, v)) +} + +// ClaimsUserIDNEQ applies the NEQ predicate on the "claims_user_id" field. +func ClaimsUserIDNEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldClaimsUserID, v)) +} + +// ClaimsUserIDIn applies the In predicate on the "claims_user_id" field. +func ClaimsUserIDIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldClaimsUserID, vs...)) +} + +// ClaimsUserIDNotIn applies the NotIn predicate on the "claims_user_id" field. +func ClaimsUserIDNotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldClaimsUserID, vs...)) +} + +// ClaimsUserIDGT applies the GT predicate on the "claims_user_id" field. +func ClaimsUserIDGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldClaimsUserID, v)) +} + +// ClaimsUserIDGTE applies the GTE predicate on the "claims_user_id" field. +func ClaimsUserIDGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldClaimsUserID, v)) +} + +// ClaimsUserIDLT applies the LT predicate on the "claims_user_id" field. +func ClaimsUserIDLT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldClaimsUserID, v)) +} + +// ClaimsUserIDLTE applies the LTE predicate on the "claims_user_id" field. +func ClaimsUserIDLTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldClaimsUserID, v)) +} + +// ClaimsUserIDContains applies the Contains predicate on the "claims_user_id" field. +func ClaimsUserIDContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldClaimsUserID, v)) +} + +// ClaimsUserIDHasPrefix applies the HasPrefix predicate on the "claims_user_id" field. +func ClaimsUserIDHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldClaimsUserID, v)) +} + +// ClaimsUserIDHasSuffix applies the HasSuffix predicate on the "claims_user_id" field. +func ClaimsUserIDHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldClaimsUserID, v)) +} + +// ClaimsUserIDEqualFold applies the EqualFold predicate on the "claims_user_id" field. +func ClaimsUserIDEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldClaimsUserID, v)) +} + +// ClaimsUserIDContainsFold applies the ContainsFold predicate on the "claims_user_id" field. +func ClaimsUserIDContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldClaimsUserID, v)) +} + +// ClaimsUsernameEQ applies the EQ predicate on the "claims_username" field. +func ClaimsUsernameEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClaimsUsername, v)) +} + +// ClaimsUsernameNEQ applies the NEQ predicate on the "claims_username" field. +func ClaimsUsernameNEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldClaimsUsername, v)) +} + +// ClaimsUsernameIn applies the In predicate on the "claims_username" field. +func ClaimsUsernameIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldClaimsUsername, vs...)) +} + +// ClaimsUsernameNotIn applies the NotIn predicate on the "claims_username" field. +func ClaimsUsernameNotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldClaimsUsername, vs...)) +} + +// ClaimsUsernameGT applies the GT predicate on the "claims_username" field. +func ClaimsUsernameGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldClaimsUsername, v)) +} + +// ClaimsUsernameGTE applies the GTE predicate on the "claims_username" field. +func ClaimsUsernameGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldClaimsUsername, v)) +} + +// ClaimsUsernameLT applies the LT predicate on the "claims_username" field. +func ClaimsUsernameLT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldClaimsUsername, v)) +} + +// ClaimsUsernameLTE applies the LTE predicate on the "claims_username" field. +func ClaimsUsernameLTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldClaimsUsername, v)) +} + +// ClaimsUsernameContains applies the Contains predicate on the "claims_username" field. +func ClaimsUsernameContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldClaimsUsername, v)) +} + +// ClaimsUsernameHasPrefix applies the HasPrefix predicate on the "claims_username" field. +func ClaimsUsernameHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldClaimsUsername, v)) +} + +// ClaimsUsernameHasSuffix applies the HasSuffix predicate on the "claims_username" field. +func ClaimsUsernameHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldClaimsUsername, v)) +} + +// ClaimsUsernameEqualFold applies the EqualFold predicate on the "claims_username" field. +func ClaimsUsernameEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldClaimsUsername, v)) +} + +// ClaimsUsernameContainsFold applies the ContainsFold predicate on the "claims_username" field. +func ClaimsUsernameContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldClaimsUsername, v)) +} + +// ClaimsEmailEQ applies the EQ predicate on the "claims_email" field. +func ClaimsEmailEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailNEQ applies the NEQ predicate on the "claims_email" field. +func ClaimsEmailNEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailIn applies the In predicate on the "claims_email" field. +func ClaimsEmailIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldClaimsEmail, vs...)) +} + +// ClaimsEmailNotIn applies the NotIn predicate on the "claims_email" field. +func ClaimsEmailNotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldClaimsEmail, vs...)) +} + +// ClaimsEmailGT applies the GT predicate on the "claims_email" field. +func ClaimsEmailGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldClaimsEmail, v)) +} + +// ClaimsEmailGTE applies the GTE predicate on the "claims_email" field. +func ClaimsEmailGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldClaimsEmail, v)) +} + +// ClaimsEmailLT applies the LT predicate on the "claims_email" field. +func ClaimsEmailLT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldClaimsEmail, v)) +} + +// ClaimsEmailLTE applies the LTE predicate on the "claims_email" field. +func ClaimsEmailLTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldClaimsEmail, v)) +} + +// ClaimsEmailContains applies the Contains predicate on the "claims_email" field. +func ClaimsEmailContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldClaimsEmail, v)) +} + +// ClaimsEmailHasPrefix applies the HasPrefix predicate on the "claims_email" field. +func ClaimsEmailHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldClaimsEmail, v)) +} + +// ClaimsEmailHasSuffix applies the HasSuffix predicate on the "claims_email" field. +func ClaimsEmailHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldClaimsEmail, v)) +} + +// ClaimsEmailEqualFold applies the EqualFold predicate on the "claims_email" field. +func ClaimsEmailEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldClaimsEmail, v)) +} + +// ClaimsEmailContainsFold applies the ContainsFold predicate on the "claims_email" field. +func ClaimsEmailContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldClaimsEmail, v)) +} + +// ClaimsEmailVerifiedEQ applies the EQ predicate on the "claims_email_verified" field. +func ClaimsEmailVerifiedEQ(v bool) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsEmailVerifiedNEQ applies the NEQ predicate on the "claims_email_verified" field. +func ClaimsEmailVerifiedNEQ(v bool) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsGroupsIsNil applies the IsNil predicate on the "claims_groups" field. +func ClaimsGroupsIsNil() predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIsNull(FieldClaimsGroups)) +} + +// ClaimsGroupsNotNil applies the NotNil predicate on the "claims_groups" field. +func ClaimsGroupsNotNil() predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotNull(FieldClaimsGroups)) +} + +// ClaimsPreferredUsernameEQ applies the EQ predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameNEQ applies the NEQ predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameNEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameIn applies the In predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldClaimsPreferredUsername, vs...)) +} + +// ClaimsPreferredUsernameNotIn applies the NotIn predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameNotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldClaimsPreferredUsername, vs...)) +} + +// ClaimsPreferredUsernameGT applies the GT predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameGTE applies the GTE predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameLT applies the LT predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameLT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameLTE applies the LTE predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameLTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameContains applies the Contains predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameHasPrefix applies the HasPrefix predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameHasSuffix applies the HasSuffix predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameEqualFold applies the EqualFold predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameContainsFold applies the ContainsFold predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldClaimsPreferredUsername, v)) +} + +// ConnectorIDEQ applies the EQ predicate on the "connector_id" field. +func ConnectorIDEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldConnectorID, v)) +} + +// ConnectorIDNEQ applies the NEQ predicate on the "connector_id" field. +func ConnectorIDNEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldConnectorID, v)) +} + +// ConnectorIDIn applies the In predicate on the "connector_id" field. +func ConnectorIDIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldConnectorID, vs...)) +} + +// ConnectorIDNotIn applies the NotIn predicate on the "connector_id" field. +func ConnectorIDNotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldConnectorID, vs...)) +} + +// ConnectorIDGT applies the GT predicate on the "connector_id" field. +func ConnectorIDGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldConnectorID, v)) +} + +// ConnectorIDGTE applies the GTE predicate on the "connector_id" field. +func ConnectorIDGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldConnectorID, v)) +} + +// ConnectorIDLT applies the LT predicate on the "connector_id" field. +func ConnectorIDLT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldConnectorID, v)) +} + +// ConnectorIDLTE applies the LTE predicate on the "connector_id" field. +func ConnectorIDLTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldConnectorID, v)) +} + +// ConnectorIDContains applies the Contains predicate on the "connector_id" field. +func ConnectorIDContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldConnectorID, v)) +} + +// ConnectorIDHasPrefix applies the HasPrefix predicate on the "connector_id" field. +func ConnectorIDHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldConnectorID, v)) +} + +// ConnectorIDHasSuffix applies the HasSuffix predicate on the "connector_id" field. +func ConnectorIDHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldConnectorID, v)) +} + +// ConnectorIDEqualFold applies the EqualFold predicate on the "connector_id" field. +func ConnectorIDEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldConnectorID, v)) +} + +// ConnectorIDContainsFold applies the ContainsFold predicate on the "connector_id" field. +func ConnectorIDContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldConnectorID, v)) +} + +// ConnectorDataEQ applies the EQ predicate on the "connector_data" field. +func ConnectorDataEQ(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldConnectorData, v)) +} + +// ConnectorDataNEQ applies the NEQ predicate on the "connector_data" field. +func ConnectorDataNEQ(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldConnectorData, v)) +} + +// ConnectorDataIn applies the In predicate on the "connector_data" field. +func ConnectorDataIn(vs ...[]byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldConnectorData, vs...)) +} + +// ConnectorDataNotIn applies the NotIn predicate on the "connector_data" field. +func ConnectorDataNotIn(vs ...[]byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldConnectorData, vs...)) +} + +// ConnectorDataGT applies the GT predicate on the "connector_data" field. +func ConnectorDataGT(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldConnectorData, v)) +} + +// ConnectorDataGTE applies the GTE predicate on the "connector_data" field. +func ConnectorDataGTE(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldConnectorData, v)) +} + +// ConnectorDataLT applies the LT predicate on the "connector_data" field. +func ConnectorDataLT(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldConnectorData, v)) +} + +// ConnectorDataLTE applies the LTE predicate on the "connector_data" field. +func ConnectorDataLTE(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldConnectorData, v)) +} + +// ConnectorDataIsNil applies the IsNil predicate on the "connector_data" field. +func ConnectorDataIsNil() predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIsNull(FieldConnectorData)) +} + +// ConnectorDataNotNil applies the NotNil predicate on the "connector_data" field. +func ConnectorDataNotNil() predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotNull(FieldConnectorData)) +} + +// ExpiryEQ applies the EQ predicate on the "expiry" field. +func ExpiryEQ(v time.Time) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldExpiry, v)) +} + +// ExpiryNEQ applies the NEQ predicate on the "expiry" field. +func ExpiryNEQ(v time.Time) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldExpiry, v)) +} + +// ExpiryIn applies the In predicate on the "expiry" field. +func ExpiryIn(vs ...time.Time) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldExpiry, vs...)) +} + +// ExpiryNotIn applies the NotIn predicate on the "expiry" field. +func ExpiryNotIn(vs ...time.Time) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldExpiry, vs...)) +} + +// ExpiryGT applies the GT predicate on the "expiry" field. +func ExpiryGT(v time.Time) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldExpiry, v)) +} + +// ExpiryGTE applies the GTE predicate on the "expiry" field. +func ExpiryGTE(v time.Time) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldExpiry, v)) +} + +// ExpiryLT applies the LT predicate on the "expiry" field. +func ExpiryLT(v time.Time) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldExpiry, v)) +} + +// ExpiryLTE applies the LTE predicate on the "expiry" field. +func ExpiryLTE(v time.Time) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldExpiry, v)) +} + +// CodeChallengeEQ applies the EQ predicate on the "code_challenge" field. +func CodeChallengeEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldCodeChallenge, v)) +} + +// CodeChallengeNEQ applies the NEQ predicate on the "code_challenge" field. +func CodeChallengeNEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldCodeChallenge, v)) +} + +// CodeChallengeIn applies the In predicate on the "code_challenge" field. +func CodeChallengeIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldCodeChallenge, vs...)) +} + +// CodeChallengeNotIn applies the NotIn predicate on the "code_challenge" field. +func CodeChallengeNotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldCodeChallenge, vs...)) +} + +// CodeChallengeGT applies the GT predicate on the "code_challenge" field. +func CodeChallengeGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldCodeChallenge, v)) +} + +// CodeChallengeGTE applies the GTE predicate on the "code_challenge" field. +func CodeChallengeGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldCodeChallenge, v)) +} + +// CodeChallengeLT applies the LT predicate on the "code_challenge" field. +func CodeChallengeLT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldCodeChallenge, v)) +} + +// CodeChallengeLTE applies the LTE predicate on the "code_challenge" field. +func CodeChallengeLTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldCodeChallenge, v)) +} + +// CodeChallengeContains applies the Contains predicate on the "code_challenge" field. +func CodeChallengeContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldCodeChallenge, v)) +} + +// CodeChallengeHasPrefix applies the HasPrefix predicate on the "code_challenge" field. +func CodeChallengeHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldCodeChallenge, v)) +} + +// CodeChallengeHasSuffix applies the HasSuffix predicate on the "code_challenge" field. +func CodeChallengeHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldCodeChallenge, v)) +} + +// CodeChallengeEqualFold applies the EqualFold predicate on the "code_challenge" field. +func CodeChallengeEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldCodeChallenge, v)) +} + +// CodeChallengeContainsFold applies the ContainsFold predicate on the "code_challenge" field. +func CodeChallengeContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldCodeChallenge, v)) +} + +// CodeChallengeMethodEQ applies the EQ predicate on the "code_challenge_method" field. +func CodeChallengeMethodEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodNEQ applies the NEQ predicate on the "code_challenge_method" field. +func CodeChallengeMethodNEQ(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodIn applies the In predicate on the "code_challenge_method" field. +func CodeChallengeMethodIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldCodeChallengeMethod, vs...)) +} + +// CodeChallengeMethodNotIn applies the NotIn predicate on the "code_challenge_method" field. +func CodeChallengeMethodNotIn(vs ...string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldCodeChallengeMethod, vs...)) +} + +// CodeChallengeMethodGT applies the GT predicate on the "code_challenge_method" field. +func CodeChallengeMethodGT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodGTE applies the GTE predicate on the "code_challenge_method" field. +func CodeChallengeMethodGTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodLT applies the LT predicate on the "code_challenge_method" field. +func CodeChallengeMethodLT(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodLTE applies the LTE predicate on the "code_challenge_method" field. +func CodeChallengeMethodLTE(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodContains applies the Contains predicate on the "code_challenge_method" field. +func CodeChallengeMethodContains(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContains(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodHasPrefix applies the HasPrefix predicate on the "code_challenge_method" field. +func CodeChallengeMethodHasPrefix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasPrefix(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodHasSuffix applies the HasSuffix predicate on the "code_challenge_method" field. +func CodeChallengeMethodHasSuffix(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldHasSuffix(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodEqualFold applies the EqualFold predicate on the "code_challenge_method" field. +func CodeChallengeMethodEqualFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEqualFold(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodContainsFold applies the ContainsFold predicate on the "code_challenge_method" field. +func CodeChallengeMethodContainsFold(v string) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldContainsFold(FieldCodeChallengeMethod, v)) +} + +// HmacKeyEQ applies the EQ predicate on the "hmac_key" field. +func HmacKeyEQ(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldEQ(FieldHmacKey, v)) +} + +// HmacKeyNEQ applies the NEQ predicate on the "hmac_key" field. +func HmacKeyNEQ(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNEQ(FieldHmacKey, v)) +} + +// HmacKeyIn applies the In predicate on the "hmac_key" field. +func HmacKeyIn(vs ...[]byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldIn(FieldHmacKey, vs...)) +} + +// HmacKeyNotIn applies the NotIn predicate on the "hmac_key" field. +func HmacKeyNotIn(vs ...[]byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldNotIn(FieldHmacKey, vs...)) +} + +// HmacKeyGT applies the GT predicate on the "hmac_key" field. +func HmacKeyGT(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGT(FieldHmacKey, v)) +} + +// HmacKeyGTE applies the GTE predicate on the "hmac_key" field. +func HmacKeyGTE(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldGTE(FieldHmacKey, v)) +} + +// HmacKeyLT applies the LT predicate on the "hmac_key" field. +func HmacKeyLT(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLT(FieldHmacKey, v)) +} + +// HmacKeyLTE applies the LTE predicate on the "hmac_key" field. +func HmacKeyLTE(v []byte) predicate.AuthRequest { + return predicate.AuthRequest(sql.FieldLTE(FieldHmacKey, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.AuthRequest) predicate.AuthRequest { + return predicate.AuthRequest(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.AuthRequest) predicate.AuthRequest { + return predicate.AuthRequest(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.AuthRequest) predicate.AuthRequest { + return predicate.AuthRequest(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_create.go b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_create.go new file mode 100644 index 00000000..2c3d8f41 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_create.go @@ -0,0 +1,473 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authrequest" +) + +// AuthRequestCreate is the builder for creating a AuthRequest entity. +type AuthRequestCreate struct { + config + mutation *AuthRequestMutation + hooks []Hook +} + +// SetClientID sets the "client_id" field. +func (arc *AuthRequestCreate) SetClientID(s string) *AuthRequestCreate { + arc.mutation.SetClientID(s) + return arc +} + +// SetScopes sets the "scopes" field. +func (arc *AuthRequestCreate) SetScopes(s []string) *AuthRequestCreate { + arc.mutation.SetScopes(s) + return arc +} + +// SetResponseTypes sets the "response_types" field. +func (arc *AuthRequestCreate) SetResponseTypes(s []string) *AuthRequestCreate { + arc.mutation.SetResponseTypes(s) + return arc +} + +// SetRedirectURI sets the "redirect_uri" field. +func (arc *AuthRequestCreate) SetRedirectURI(s string) *AuthRequestCreate { + arc.mutation.SetRedirectURI(s) + return arc +} + +// SetNonce sets the "nonce" field. +func (arc *AuthRequestCreate) SetNonce(s string) *AuthRequestCreate { + arc.mutation.SetNonce(s) + return arc +} + +// SetState sets the "state" field. +func (arc *AuthRequestCreate) SetState(s string) *AuthRequestCreate { + arc.mutation.SetState(s) + return arc +} + +// SetForceApprovalPrompt sets the "force_approval_prompt" field. +func (arc *AuthRequestCreate) SetForceApprovalPrompt(b bool) *AuthRequestCreate { + arc.mutation.SetForceApprovalPrompt(b) + return arc +} + +// SetLoggedIn sets the "logged_in" field. +func (arc *AuthRequestCreate) SetLoggedIn(b bool) *AuthRequestCreate { + arc.mutation.SetLoggedIn(b) + return arc +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (arc *AuthRequestCreate) SetClaimsUserID(s string) *AuthRequestCreate { + arc.mutation.SetClaimsUserID(s) + return arc +} + +// SetClaimsUsername sets the "claims_username" field. +func (arc *AuthRequestCreate) SetClaimsUsername(s string) *AuthRequestCreate { + arc.mutation.SetClaimsUsername(s) + return arc +} + +// SetClaimsEmail sets the "claims_email" field. +func (arc *AuthRequestCreate) SetClaimsEmail(s string) *AuthRequestCreate { + arc.mutation.SetClaimsEmail(s) + return arc +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (arc *AuthRequestCreate) SetClaimsEmailVerified(b bool) *AuthRequestCreate { + arc.mutation.SetClaimsEmailVerified(b) + return arc +} + +// SetClaimsGroups sets the "claims_groups" field. +func (arc *AuthRequestCreate) SetClaimsGroups(s []string) *AuthRequestCreate { + arc.mutation.SetClaimsGroups(s) + return arc +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (arc *AuthRequestCreate) SetClaimsPreferredUsername(s string) *AuthRequestCreate { + arc.mutation.SetClaimsPreferredUsername(s) + return arc +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (arc *AuthRequestCreate) SetNillableClaimsPreferredUsername(s *string) *AuthRequestCreate { + if s != nil { + arc.SetClaimsPreferredUsername(*s) + } + return arc +} + +// SetConnectorID sets the "connector_id" field. +func (arc *AuthRequestCreate) SetConnectorID(s string) *AuthRequestCreate { + arc.mutation.SetConnectorID(s) + return arc +} + +// SetConnectorData sets the "connector_data" field. +func (arc *AuthRequestCreate) SetConnectorData(b []byte) *AuthRequestCreate { + arc.mutation.SetConnectorData(b) + return arc +} + +// SetExpiry sets the "expiry" field. +func (arc *AuthRequestCreate) SetExpiry(t time.Time) *AuthRequestCreate { + arc.mutation.SetExpiry(t) + return arc +} + +// SetCodeChallenge sets the "code_challenge" field. +func (arc *AuthRequestCreate) SetCodeChallenge(s string) *AuthRequestCreate { + arc.mutation.SetCodeChallenge(s) + return arc +} + +// SetNillableCodeChallenge sets the "code_challenge" field if the given value is not nil. +func (arc *AuthRequestCreate) SetNillableCodeChallenge(s *string) *AuthRequestCreate { + if s != nil { + arc.SetCodeChallenge(*s) + } + return arc +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (arc *AuthRequestCreate) SetCodeChallengeMethod(s string) *AuthRequestCreate { + arc.mutation.SetCodeChallengeMethod(s) + return arc +} + +// SetNillableCodeChallengeMethod sets the "code_challenge_method" field if the given value is not nil. +func (arc *AuthRequestCreate) SetNillableCodeChallengeMethod(s *string) *AuthRequestCreate { + if s != nil { + arc.SetCodeChallengeMethod(*s) + } + return arc +} + +// SetHmacKey sets the "hmac_key" field. +func (arc *AuthRequestCreate) SetHmacKey(b []byte) *AuthRequestCreate { + arc.mutation.SetHmacKey(b) + return arc +} + +// SetID sets the "id" field. +func (arc *AuthRequestCreate) SetID(s string) *AuthRequestCreate { + arc.mutation.SetID(s) + return arc +} + +// Mutation returns the AuthRequestMutation object of the builder. +func (arc *AuthRequestCreate) Mutation() *AuthRequestMutation { + return arc.mutation +} + +// Save creates the AuthRequest in the database. +func (arc *AuthRequestCreate) Save(ctx context.Context) (*AuthRequest, error) { + arc.defaults() + return withHooks(ctx, arc.sqlSave, arc.mutation, arc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (arc *AuthRequestCreate) SaveX(ctx context.Context) *AuthRequest { + v, err := arc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (arc *AuthRequestCreate) Exec(ctx context.Context) error { + _, err := arc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (arc *AuthRequestCreate) ExecX(ctx context.Context) { + if err := arc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (arc *AuthRequestCreate) defaults() { + if _, ok := arc.mutation.ClaimsPreferredUsername(); !ok { + v := authrequest.DefaultClaimsPreferredUsername + arc.mutation.SetClaimsPreferredUsername(v) + } + if _, ok := arc.mutation.CodeChallenge(); !ok { + v := authrequest.DefaultCodeChallenge + arc.mutation.SetCodeChallenge(v) + } + if _, ok := arc.mutation.CodeChallengeMethod(); !ok { + v := authrequest.DefaultCodeChallengeMethod + arc.mutation.SetCodeChallengeMethod(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (arc *AuthRequestCreate) check() error { + if _, ok := arc.mutation.ClientID(); !ok { + return &ValidationError{Name: "client_id", err: errors.New(`db: missing required field "AuthRequest.client_id"`)} + } + if _, ok := arc.mutation.RedirectURI(); !ok { + return &ValidationError{Name: "redirect_uri", err: errors.New(`db: missing required field "AuthRequest.redirect_uri"`)} + } + if _, ok := arc.mutation.Nonce(); !ok { + return &ValidationError{Name: "nonce", err: errors.New(`db: missing required field "AuthRequest.nonce"`)} + } + if _, ok := arc.mutation.State(); !ok { + return &ValidationError{Name: "state", err: errors.New(`db: missing required field "AuthRequest.state"`)} + } + if _, ok := arc.mutation.ForceApprovalPrompt(); !ok { + return &ValidationError{Name: "force_approval_prompt", err: errors.New(`db: missing required field "AuthRequest.force_approval_prompt"`)} + } + if _, ok := arc.mutation.LoggedIn(); !ok { + return &ValidationError{Name: "logged_in", err: errors.New(`db: missing required field "AuthRequest.logged_in"`)} + } + if _, ok := arc.mutation.ClaimsUserID(); !ok { + return &ValidationError{Name: "claims_user_id", err: errors.New(`db: missing required field "AuthRequest.claims_user_id"`)} + } + if _, ok := arc.mutation.ClaimsUsername(); !ok { + return &ValidationError{Name: "claims_username", err: errors.New(`db: missing required field "AuthRequest.claims_username"`)} + } + if _, ok := arc.mutation.ClaimsEmail(); !ok { + return &ValidationError{Name: "claims_email", err: errors.New(`db: missing required field "AuthRequest.claims_email"`)} + } + if _, ok := arc.mutation.ClaimsEmailVerified(); !ok { + return &ValidationError{Name: "claims_email_verified", err: errors.New(`db: missing required field "AuthRequest.claims_email_verified"`)} + } + if _, ok := arc.mutation.ClaimsPreferredUsername(); !ok { + return &ValidationError{Name: "claims_preferred_username", err: errors.New(`db: missing required field "AuthRequest.claims_preferred_username"`)} + } + if _, ok := arc.mutation.ConnectorID(); !ok { + return &ValidationError{Name: "connector_id", err: errors.New(`db: missing required field "AuthRequest.connector_id"`)} + } + if _, ok := arc.mutation.Expiry(); !ok { + return &ValidationError{Name: "expiry", err: errors.New(`db: missing required field "AuthRequest.expiry"`)} + } + if _, ok := arc.mutation.CodeChallenge(); !ok { + return &ValidationError{Name: "code_challenge", err: errors.New(`db: missing required field "AuthRequest.code_challenge"`)} + } + if _, ok := arc.mutation.CodeChallengeMethod(); !ok { + return &ValidationError{Name: "code_challenge_method", err: errors.New(`db: missing required field "AuthRequest.code_challenge_method"`)} + } + if _, ok := arc.mutation.HmacKey(); !ok { + return &ValidationError{Name: "hmac_key", err: errors.New(`db: missing required field "AuthRequest.hmac_key"`)} + } + if v, ok := arc.mutation.ID(); ok { + if err := authrequest.IDValidator(v); err != nil { + return &ValidationError{Name: "id", err: fmt.Errorf(`db: validator failed for field "AuthRequest.id": %w`, err)} + } + } + return nil +} + +func (arc *AuthRequestCreate) sqlSave(ctx context.Context) (*AuthRequest, error) { + if err := arc.check(); err != nil { + return nil, err + } + _node, _spec := arc.createSpec() + if err := sqlgraph.CreateNode(ctx, arc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected AuthRequest.ID type: %T", _spec.ID.Value) + } + } + arc.mutation.id = &_node.ID + arc.mutation.done = true + return _node, nil +} + +func (arc *AuthRequestCreate) createSpec() (*AuthRequest, *sqlgraph.CreateSpec) { + var ( + _node = &AuthRequest{config: arc.config} + _spec = sqlgraph.NewCreateSpec(authrequest.Table, sqlgraph.NewFieldSpec(authrequest.FieldID, field.TypeString)) + ) + if id, ok := arc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := arc.mutation.ClientID(); ok { + _spec.SetField(authrequest.FieldClientID, field.TypeString, value) + _node.ClientID = value + } + if value, ok := arc.mutation.Scopes(); ok { + _spec.SetField(authrequest.FieldScopes, field.TypeJSON, value) + _node.Scopes = value + } + if value, ok := arc.mutation.ResponseTypes(); ok { + _spec.SetField(authrequest.FieldResponseTypes, field.TypeJSON, value) + _node.ResponseTypes = value + } + if value, ok := arc.mutation.RedirectURI(); ok { + _spec.SetField(authrequest.FieldRedirectURI, field.TypeString, value) + _node.RedirectURI = value + } + if value, ok := arc.mutation.Nonce(); ok { + _spec.SetField(authrequest.FieldNonce, field.TypeString, value) + _node.Nonce = value + } + if value, ok := arc.mutation.State(); ok { + _spec.SetField(authrequest.FieldState, field.TypeString, value) + _node.State = value + } + if value, ok := arc.mutation.ForceApprovalPrompt(); ok { + _spec.SetField(authrequest.FieldForceApprovalPrompt, field.TypeBool, value) + _node.ForceApprovalPrompt = value + } + if value, ok := arc.mutation.LoggedIn(); ok { + _spec.SetField(authrequest.FieldLoggedIn, field.TypeBool, value) + _node.LoggedIn = value + } + if value, ok := arc.mutation.ClaimsUserID(); ok { + _spec.SetField(authrequest.FieldClaimsUserID, field.TypeString, value) + _node.ClaimsUserID = value + } + if value, ok := arc.mutation.ClaimsUsername(); ok { + _spec.SetField(authrequest.FieldClaimsUsername, field.TypeString, value) + _node.ClaimsUsername = value + } + if value, ok := arc.mutation.ClaimsEmail(); ok { + _spec.SetField(authrequest.FieldClaimsEmail, field.TypeString, value) + _node.ClaimsEmail = value + } + if value, ok := arc.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(authrequest.FieldClaimsEmailVerified, field.TypeBool, value) + _node.ClaimsEmailVerified = value + } + if value, ok := arc.mutation.ClaimsGroups(); ok { + _spec.SetField(authrequest.FieldClaimsGroups, field.TypeJSON, value) + _node.ClaimsGroups = value + } + if value, ok := arc.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(authrequest.FieldClaimsPreferredUsername, field.TypeString, value) + _node.ClaimsPreferredUsername = value + } + if value, ok := arc.mutation.ConnectorID(); ok { + _spec.SetField(authrequest.FieldConnectorID, field.TypeString, value) + _node.ConnectorID = value + } + if value, ok := arc.mutation.ConnectorData(); ok { + _spec.SetField(authrequest.FieldConnectorData, field.TypeBytes, value) + _node.ConnectorData = &value + } + if value, ok := arc.mutation.Expiry(); ok { + _spec.SetField(authrequest.FieldExpiry, field.TypeTime, value) + _node.Expiry = value + } + if value, ok := arc.mutation.CodeChallenge(); ok { + _spec.SetField(authrequest.FieldCodeChallenge, field.TypeString, value) + _node.CodeChallenge = value + } + if value, ok := arc.mutation.CodeChallengeMethod(); ok { + _spec.SetField(authrequest.FieldCodeChallengeMethod, field.TypeString, value) + _node.CodeChallengeMethod = value + } + if value, ok := arc.mutation.HmacKey(); ok { + _spec.SetField(authrequest.FieldHmacKey, field.TypeBytes, value) + _node.HmacKey = value + } + return _node, _spec +} + +// AuthRequestCreateBulk is the builder for creating many AuthRequest entities in bulk. +type AuthRequestCreateBulk struct { + config + builders []*AuthRequestCreate +} + +// Save creates the AuthRequest entities in the database. +func (arcb *AuthRequestCreateBulk) Save(ctx context.Context) ([]*AuthRequest, error) { + specs := make([]*sqlgraph.CreateSpec, len(arcb.builders)) + nodes := make([]*AuthRequest, len(arcb.builders)) + mutators := make([]Mutator, len(arcb.builders)) + for i := range arcb.builders { + func(i int, root context.Context) { + builder := arcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AuthRequestMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, arcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, arcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, arcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (arcb *AuthRequestCreateBulk) SaveX(ctx context.Context) []*AuthRequest { + v, err := arcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (arcb *AuthRequestCreateBulk) Exec(ctx context.Context) error { + _, err := arcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (arcb *AuthRequestCreateBulk) ExecX(ctx context.Context) { + if err := arcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_delete.go b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_delete.go new file mode 100644 index 00000000..0cef693a --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// AuthRequestDelete is the builder for deleting a AuthRequest entity. +type AuthRequestDelete struct { + config + hooks []Hook + mutation *AuthRequestMutation +} + +// Where appends a list predicates to the AuthRequestDelete builder. +func (ard *AuthRequestDelete) Where(ps ...predicate.AuthRequest) *AuthRequestDelete { + ard.mutation.Where(ps...) + return ard +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ard *AuthRequestDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ard.sqlExec, ard.mutation, ard.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ard *AuthRequestDelete) ExecX(ctx context.Context) int { + n, err := ard.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ard *AuthRequestDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(authrequest.Table, sqlgraph.NewFieldSpec(authrequest.FieldID, field.TypeString)) + if ps := ard.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ard.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ard.mutation.done = true + return affected, err +} + +// AuthRequestDeleteOne is the builder for deleting a single AuthRequest entity. +type AuthRequestDeleteOne struct { + ard *AuthRequestDelete +} + +// Where appends a list predicates to the AuthRequestDelete builder. +func (ardo *AuthRequestDeleteOne) Where(ps ...predicate.AuthRequest) *AuthRequestDeleteOne { + ardo.ard.mutation.Where(ps...) + return ardo +} + +// Exec executes the deletion query. +func (ardo *AuthRequestDeleteOne) Exec(ctx context.Context) error { + n, err := ardo.ard.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{authrequest.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ardo *AuthRequestDeleteOne) ExecX(ctx context.Context) { + if err := ardo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_query.go b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_query.go new file mode 100644 index 00000000..3ffbf788 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// AuthRequestQuery is the builder for querying AuthRequest entities. +type AuthRequestQuery struct { + config + ctx *QueryContext + order []authrequest.OrderOption + inters []Interceptor + predicates []predicate.AuthRequest + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AuthRequestQuery builder. +func (arq *AuthRequestQuery) Where(ps ...predicate.AuthRequest) *AuthRequestQuery { + arq.predicates = append(arq.predicates, ps...) + return arq +} + +// Limit the number of records to be returned by this query. +func (arq *AuthRequestQuery) Limit(limit int) *AuthRequestQuery { + arq.ctx.Limit = &limit + return arq +} + +// Offset to start from. +func (arq *AuthRequestQuery) Offset(offset int) *AuthRequestQuery { + arq.ctx.Offset = &offset + return arq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (arq *AuthRequestQuery) Unique(unique bool) *AuthRequestQuery { + arq.ctx.Unique = &unique + return arq +} + +// Order specifies how the records should be ordered. +func (arq *AuthRequestQuery) Order(o ...authrequest.OrderOption) *AuthRequestQuery { + arq.order = append(arq.order, o...) + return arq +} + +// First returns the first AuthRequest entity from the query. +// Returns a *NotFoundError when no AuthRequest was found. +func (arq *AuthRequestQuery) First(ctx context.Context) (*AuthRequest, error) { + nodes, err := arq.Limit(1).All(setContextOp(ctx, arq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{authrequest.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (arq *AuthRequestQuery) FirstX(ctx context.Context) *AuthRequest { + node, err := arq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first AuthRequest ID from the query. +// Returns a *NotFoundError when no AuthRequest ID was found. +func (arq *AuthRequestQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = arq.Limit(1).IDs(setContextOp(ctx, arq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{authrequest.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (arq *AuthRequestQuery) FirstIDX(ctx context.Context) string { + id, err := arq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single AuthRequest entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one AuthRequest entity is found. +// Returns a *NotFoundError when no AuthRequest entities are found. +func (arq *AuthRequestQuery) Only(ctx context.Context) (*AuthRequest, error) { + nodes, err := arq.Limit(2).All(setContextOp(ctx, arq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{authrequest.Label} + default: + return nil, &NotSingularError{authrequest.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (arq *AuthRequestQuery) OnlyX(ctx context.Context) *AuthRequest { + node, err := arq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only AuthRequest ID in the query. +// Returns a *NotSingularError when more than one AuthRequest ID is found. +// Returns a *NotFoundError when no entities are found. +func (arq *AuthRequestQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = arq.Limit(2).IDs(setContextOp(ctx, arq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{authrequest.Label} + default: + err = &NotSingularError{authrequest.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (arq *AuthRequestQuery) OnlyIDX(ctx context.Context) string { + id, err := arq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of AuthRequests. +func (arq *AuthRequestQuery) All(ctx context.Context) ([]*AuthRequest, error) { + ctx = setContextOp(ctx, arq.ctx, "All") + if err := arq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*AuthRequest, *AuthRequestQuery]() + return withInterceptors[[]*AuthRequest](ctx, arq, qr, arq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (arq *AuthRequestQuery) AllX(ctx context.Context) []*AuthRequest { + nodes, err := arq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of AuthRequest IDs. +func (arq *AuthRequestQuery) IDs(ctx context.Context) (ids []string, err error) { + if arq.ctx.Unique == nil && arq.path != nil { + arq.Unique(true) + } + ctx = setContextOp(ctx, arq.ctx, "IDs") + if err = arq.Select(authrequest.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (arq *AuthRequestQuery) IDsX(ctx context.Context) []string { + ids, err := arq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (arq *AuthRequestQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, arq.ctx, "Count") + if err := arq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, arq, querierCount[*AuthRequestQuery](), arq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (arq *AuthRequestQuery) CountX(ctx context.Context) int { + count, err := arq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (arq *AuthRequestQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, arq.ctx, "Exist") + switch _, err := arq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (arq *AuthRequestQuery) ExistX(ctx context.Context) bool { + exist, err := arq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AuthRequestQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (arq *AuthRequestQuery) Clone() *AuthRequestQuery { + if arq == nil { + return nil + } + return &AuthRequestQuery{ + config: arq.config, + ctx: arq.ctx.Clone(), + order: append([]authrequest.OrderOption{}, arq.order...), + inters: append([]Interceptor{}, arq.inters...), + predicates: append([]predicate.AuthRequest{}, arq.predicates...), + // clone intermediate query. + sql: arq.sql.Clone(), + path: arq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// ClientID string `json:"client_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.AuthRequest.Query(). +// GroupBy(authrequest.FieldClientID). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (arq *AuthRequestQuery) GroupBy(field string, fields ...string) *AuthRequestGroupBy { + arq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AuthRequestGroupBy{build: arq} + grbuild.flds = &arq.ctx.Fields + grbuild.label = authrequest.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// ClientID string `json:"client_id,omitempty"` +// } +// +// client.AuthRequest.Query(). +// Select(authrequest.FieldClientID). +// Scan(ctx, &v) +func (arq *AuthRequestQuery) Select(fields ...string) *AuthRequestSelect { + arq.ctx.Fields = append(arq.ctx.Fields, fields...) + sbuild := &AuthRequestSelect{AuthRequestQuery: arq} + sbuild.label = authrequest.Label + sbuild.flds, sbuild.scan = &arq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AuthRequestSelect configured with the given aggregations. +func (arq *AuthRequestQuery) Aggregate(fns ...AggregateFunc) *AuthRequestSelect { + return arq.Select().Aggregate(fns...) +} + +func (arq *AuthRequestQuery) prepareQuery(ctx context.Context) error { + for _, inter := range arq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, arq); err != nil { + return err + } + } + } + for _, f := range arq.ctx.Fields { + if !authrequest.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if arq.path != nil { + prev, err := arq.path(ctx) + if err != nil { + return err + } + arq.sql = prev + } + return nil +} + +func (arq *AuthRequestQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AuthRequest, error) { + var ( + nodes = []*AuthRequest{} + _spec = arq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AuthRequest).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &AuthRequest{config: arq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, arq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (arq *AuthRequestQuery) sqlCount(ctx context.Context) (int, error) { + _spec := arq.querySpec() + _spec.Node.Columns = arq.ctx.Fields + if len(arq.ctx.Fields) > 0 { + _spec.Unique = arq.ctx.Unique != nil && *arq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, arq.driver, _spec) +} + +func (arq *AuthRequestQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(authrequest.Table, authrequest.Columns, sqlgraph.NewFieldSpec(authrequest.FieldID, field.TypeString)) + _spec.From = arq.sql + if unique := arq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if arq.path != nil { + _spec.Unique = true + } + if fields := arq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, authrequest.FieldID) + for i := range fields { + if fields[i] != authrequest.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := arq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := arq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := arq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := arq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (arq *AuthRequestQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(arq.driver.Dialect()) + t1 := builder.Table(authrequest.Table) + columns := arq.ctx.Fields + if len(columns) == 0 { + columns = authrequest.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if arq.sql != nil { + selector = arq.sql + selector.Select(selector.Columns(columns...)...) + } + if arq.ctx.Unique != nil && *arq.ctx.Unique { + selector.Distinct() + } + for _, p := range arq.predicates { + p(selector) + } + for _, p := range arq.order { + p(selector) + } + if offset := arq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := arq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AuthRequestGroupBy is the group-by builder for AuthRequest entities. +type AuthRequestGroupBy struct { + selector + build *AuthRequestQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (argb *AuthRequestGroupBy) Aggregate(fns ...AggregateFunc) *AuthRequestGroupBy { + argb.fns = append(argb.fns, fns...) + return argb +} + +// Scan applies the selector query and scans the result into the given value. +func (argb *AuthRequestGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, argb.build.ctx, "GroupBy") + if err := argb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AuthRequestQuery, *AuthRequestGroupBy](ctx, argb.build, argb, argb.build.inters, v) +} + +func (argb *AuthRequestGroupBy) sqlScan(ctx context.Context, root *AuthRequestQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(argb.fns)) + for _, fn := range argb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*argb.flds)+len(argb.fns)) + for _, f := range *argb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*argb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := argb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AuthRequestSelect is the builder for selecting fields of AuthRequest entities. +type AuthRequestSelect struct { + *AuthRequestQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ars *AuthRequestSelect) Aggregate(fns ...AggregateFunc) *AuthRequestSelect { + ars.fns = append(ars.fns, fns...) + return ars +} + +// Scan applies the selector query and scans the result into the given value. +func (ars *AuthRequestSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ars.ctx, "Select") + if err := ars.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AuthRequestQuery, *AuthRequestSelect](ctx, ars.AuthRequestQuery, ars, ars.inters, v) +} + +func (ars *AuthRequestSelect) sqlScan(ctx context.Context, root *AuthRequestQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ars.fns)) + for _, fn := range ars.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ars.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ars.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_update.go b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_update.go new file mode 100644 index 00000000..2fb3c0cb --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/authrequest_update.go @@ -0,0 +1,723 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// AuthRequestUpdate is the builder for updating AuthRequest entities. +type AuthRequestUpdate struct { + config + hooks []Hook + mutation *AuthRequestMutation +} + +// Where appends a list predicates to the AuthRequestUpdate builder. +func (aru *AuthRequestUpdate) Where(ps ...predicate.AuthRequest) *AuthRequestUpdate { + aru.mutation.Where(ps...) + return aru +} + +// SetClientID sets the "client_id" field. +func (aru *AuthRequestUpdate) SetClientID(s string) *AuthRequestUpdate { + aru.mutation.SetClientID(s) + return aru +} + +// SetScopes sets the "scopes" field. +func (aru *AuthRequestUpdate) SetScopes(s []string) *AuthRequestUpdate { + aru.mutation.SetScopes(s) + return aru +} + +// AppendScopes appends s to the "scopes" field. +func (aru *AuthRequestUpdate) AppendScopes(s []string) *AuthRequestUpdate { + aru.mutation.AppendScopes(s) + return aru +} + +// ClearScopes clears the value of the "scopes" field. +func (aru *AuthRequestUpdate) ClearScopes() *AuthRequestUpdate { + aru.mutation.ClearScopes() + return aru +} + +// SetResponseTypes sets the "response_types" field. +func (aru *AuthRequestUpdate) SetResponseTypes(s []string) *AuthRequestUpdate { + aru.mutation.SetResponseTypes(s) + return aru +} + +// AppendResponseTypes appends s to the "response_types" field. +func (aru *AuthRequestUpdate) AppendResponseTypes(s []string) *AuthRequestUpdate { + aru.mutation.AppendResponseTypes(s) + return aru +} + +// ClearResponseTypes clears the value of the "response_types" field. +func (aru *AuthRequestUpdate) ClearResponseTypes() *AuthRequestUpdate { + aru.mutation.ClearResponseTypes() + return aru +} + +// SetRedirectURI sets the "redirect_uri" field. +func (aru *AuthRequestUpdate) SetRedirectURI(s string) *AuthRequestUpdate { + aru.mutation.SetRedirectURI(s) + return aru +} + +// SetNonce sets the "nonce" field. +func (aru *AuthRequestUpdate) SetNonce(s string) *AuthRequestUpdate { + aru.mutation.SetNonce(s) + return aru +} + +// SetState sets the "state" field. +func (aru *AuthRequestUpdate) SetState(s string) *AuthRequestUpdate { + aru.mutation.SetState(s) + return aru +} + +// SetForceApprovalPrompt sets the "force_approval_prompt" field. +func (aru *AuthRequestUpdate) SetForceApprovalPrompt(b bool) *AuthRequestUpdate { + aru.mutation.SetForceApprovalPrompt(b) + return aru +} + +// SetLoggedIn sets the "logged_in" field. +func (aru *AuthRequestUpdate) SetLoggedIn(b bool) *AuthRequestUpdate { + aru.mutation.SetLoggedIn(b) + return aru +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (aru *AuthRequestUpdate) SetClaimsUserID(s string) *AuthRequestUpdate { + aru.mutation.SetClaimsUserID(s) + return aru +} + +// SetClaimsUsername sets the "claims_username" field. +func (aru *AuthRequestUpdate) SetClaimsUsername(s string) *AuthRequestUpdate { + aru.mutation.SetClaimsUsername(s) + return aru +} + +// SetClaimsEmail sets the "claims_email" field. +func (aru *AuthRequestUpdate) SetClaimsEmail(s string) *AuthRequestUpdate { + aru.mutation.SetClaimsEmail(s) + return aru +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (aru *AuthRequestUpdate) SetClaimsEmailVerified(b bool) *AuthRequestUpdate { + aru.mutation.SetClaimsEmailVerified(b) + return aru +} + +// SetClaimsGroups sets the "claims_groups" field. +func (aru *AuthRequestUpdate) SetClaimsGroups(s []string) *AuthRequestUpdate { + aru.mutation.SetClaimsGroups(s) + return aru +} + +// AppendClaimsGroups appends s to the "claims_groups" field. +func (aru *AuthRequestUpdate) AppendClaimsGroups(s []string) *AuthRequestUpdate { + aru.mutation.AppendClaimsGroups(s) + return aru +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (aru *AuthRequestUpdate) ClearClaimsGroups() *AuthRequestUpdate { + aru.mutation.ClearClaimsGroups() + return aru +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (aru *AuthRequestUpdate) SetClaimsPreferredUsername(s string) *AuthRequestUpdate { + aru.mutation.SetClaimsPreferredUsername(s) + return aru +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (aru *AuthRequestUpdate) SetNillableClaimsPreferredUsername(s *string) *AuthRequestUpdate { + if s != nil { + aru.SetClaimsPreferredUsername(*s) + } + return aru +} + +// SetConnectorID sets the "connector_id" field. +func (aru *AuthRequestUpdate) SetConnectorID(s string) *AuthRequestUpdate { + aru.mutation.SetConnectorID(s) + return aru +} + +// SetConnectorData sets the "connector_data" field. +func (aru *AuthRequestUpdate) SetConnectorData(b []byte) *AuthRequestUpdate { + aru.mutation.SetConnectorData(b) + return aru +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (aru *AuthRequestUpdate) ClearConnectorData() *AuthRequestUpdate { + aru.mutation.ClearConnectorData() + return aru +} + +// SetExpiry sets the "expiry" field. +func (aru *AuthRequestUpdate) SetExpiry(t time.Time) *AuthRequestUpdate { + aru.mutation.SetExpiry(t) + return aru +} + +// SetCodeChallenge sets the "code_challenge" field. +func (aru *AuthRequestUpdate) SetCodeChallenge(s string) *AuthRequestUpdate { + aru.mutation.SetCodeChallenge(s) + return aru +} + +// SetNillableCodeChallenge sets the "code_challenge" field if the given value is not nil. +func (aru *AuthRequestUpdate) SetNillableCodeChallenge(s *string) *AuthRequestUpdate { + if s != nil { + aru.SetCodeChallenge(*s) + } + return aru +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (aru *AuthRequestUpdate) SetCodeChallengeMethod(s string) *AuthRequestUpdate { + aru.mutation.SetCodeChallengeMethod(s) + return aru +} + +// SetNillableCodeChallengeMethod sets the "code_challenge_method" field if the given value is not nil. +func (aru *AuthRequestUpdate) SetNillableCodeChallengeMethod(s *string) *AuthRequestUpdate { + if s != nil { + aru.SetCodeChallengeMethod(*s) + } + return aru +} + +// SetHmacKey sets the "hmac_key" field. +func (aru *AuthRequestUpdate) SetHmacKey(b []byte) *AuthRequestUpdate { + aru.mutation.SetHmacKey(b) + return aru +} + +// Mutation returns the AuthRequestMutation object of the builder. +func (aru *AuthRequestUpdate) Mutation() *AuthRequestMutation { + return aru.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (aru *AuthRequestUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, aru.sqlSave, aru.mutation, aru.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (aru *AuthRequestUpdate) SaveX(ctx context.Context) int { + affected, err := aru.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (aru *AuthRequestUpdate) Exec(ctx context.Context) error { + _, err := aru.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (aru *AuthRequestUpdate) ExecX(ctx context.Context) { + if err := aru.Exec(ctx); err != nil { + panic(err) + } +} + +func (aru *AuthRequestUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(authrequest.Table, authrequest.Columns, sqlgraph.NewFieldSpec(authrequest.FieldID, field.TypeString)) + if ps := aru.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := aru.mutation.ClientID(); ok { + _spec.SetField(authrequest.FieldClientID, field.TypeString, value) + } + if value, ok := aru.mutation.Scopes(); ok { + _spec.SetField(authrequest.FieldScopes, field.TypeJSON, value) + } + if value, ok := aru.mutation.AppendedScopes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, authrequest.FieldScopes, value) + }) + } + if aru.mutation.ScopesCleared() { + _spec.ClearField(authrequest.FieldScopes, field.TypeJSON) + } + if value, ok := aru.mutation.ResponseTypes(); ok { + _spec.SetField(authrequest.FieldResponseTypes, field.TypeJSON, value) + } + if value, ok := aru.mutation.AppendedResponseTypes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, authrequest.FieldResponseTypes, value) + }) + } + if aru.mutation.ResponseTypesCleared() { + _spec.ClearField(authrequest.FieldResponseTypes, field.TypeJSON) + } + if value, ok := aru.mutation.RedirectURI(); ok { + _spec.SetField(authrequest.FieldRedirectURI, field.TypeString, value) + } + if value, ok := aru.mutation.Nonce(); ok { + _spec.SetField(authrequest.FieldNonce, field.TypeString, value) + } + if value, ok := aru.mutation.State(); ok { + _spec.SetField(authrequest.FieldState, field.TypeString, value) + } + if value, ok := aru.mutation.ForceApprovalPrompt(); ok { + _spec.SetField(authrequest.FieldForceApprovalPrompt, field.TypeBool, value) + } + if value, ok := aru.mutation.LoggedIn(); ok { + _spec.SetField(authrequest.FieldLoggedIn, field.TypeBool, value) + } + if value, ok := aru.mutation.ClaimsUserID(); ok { + _spec.SetField(authrequest.FieldClaimsUserID, field.TypeString, value) + } + if value, ok := aru.mutation.ClaimsUsername(); ok { + _spec.SetField(authrequest.FieldClaimsUsername, field.TypeString, value) + } + if value, ok := aru.mutation.ClaimsEmail(); ok { + _spec.SetField(authrequest.FieldClaimsEmail, field.TypeString, value) + } + if value, ok := aru.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(authrequest.FieldClaimsEmailVerified, field.TypeBool, value) + } + if value, ok := aru.mutation.ClaimsGroups(); ok { + _spec.SetField(authrequest.FieldClaimsGroups, field.TypeJSON, value) + } + if value, ok := aru.mutation.AppendedClaimsGroups(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, authrequest.FieldClaimsGroups, value) + }) + } + if aru.mutation.ClaimsGroupsCleared() { + _spec.ClearField(authrequest.FieldClaimsGroups, field.TypeJSON) + } + if value, ok := aru.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(authrequest.FieldClaimsPreferredUsername, field.TypeString, value) + } + if value, ok := aru.mutation.ConnectorID(); ok { + _spec.SetField(authrequest.FieldConnectorID, field.TypeString, value) + } + if value, ok := aru.mutation.ConnectorData(); ok { + _spec.SetField(authrequest.FieldConnectorData, field.TypeBytes, value) + } + if aru.mutation.ConnectorDataCleared() { + _spec.ClearField(authrequest.FieldConnectorData, field.TypeBytes) + } + if value, ok := aru.mutation.Expiry(); ok { + _spec.SetField(authrequest.FieldExpiry, field.TypeTime, value) + } + if value, ok := aru.mutation.CodeChallenge(); ok { + _spec.SetField(authrequest.FieldCodeChallenge, field.TypeString, value) + } + if value, ok := aru.mutation.CodeChallengeMethod(); ok { + _spec.SetField(authrequest.FieldCodeChallengeMethod, field.TypeString, value) + } + if value, ok := aru.mutation.HmacKey(); ok { + _spec.SetField(authrequest.FieldHmacKey, field.TypeBytes, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, aru.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{authrequest.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + aru.mutation.done = true + return n, nil +} + +// AuthRequestUpdateOne is the builder for updating a single AuthRequest entity. +type AuthRequestUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AuthRequestMutation +} + +// SetClientID sets the "client_id" field. +func (aruo *AuthRequestUpdateOne) SetClientID(s string) *AuthRequestUpdateOne { + aruo.mutation.SetClientID(s) + return aruo +} + +// SetScopes sets the "scopes" field. +func (aruo *AuthRequestUpdateOne) SetScopes(s []string) *AuthRequestUpdateOne { + aruo.mutation.SetScopes(s) + return aruo +} + +// AppendScopes appends s to the "scopes" field. +func (aruo *AuthRequestUpdateOne) AppendScopes(s []string) *AuthRequestUpdateOne { + aruo.mutation.AppendScopes(s) + return aruo +} + +// ClearScopes clears the value of the "scopes" field. +func (aruo *AuthRequestUpdateOne) ClearScopes() *AuthRequestUpdateOne { + aruo.mutation.ClearScopes() + return aruo +} + +// SetResponseTypes sets the "response_types" field. +func (aruo *AuthRequestUpdateOne) SetResponseTypes(s []string) *AuthRequestUpdateOne { + aruo.mutation.SetResponseTypes(s) + return aruo +} + +// AppendResponseTypes appends s to the "response_types" field. +func (aruo *AuthRequestUpdateOne) AppendResponseTypes(s []string) *AuthRequestUpdateOne { + aruo.mutation.AppendResponseTypes(s) + return aruo +} + +// ClearResponseTypes clears the value of the "response_types" field. +func (aruo *AuthRequestUpdateOne) ClearResponseTypes() *AuthRequestUpdateOne { + aruo.mutation.ClearResponseTypes() + return aruo +} + +// SetRedirectURI sets the "redirect_uri" field. +func (aruo *AuthRequestUpdateOne) SetRedirectURI(s string) *AuthRequestUpdateOne { + aruo.mutation.SetRedirectURI(s) + return aruo +} + +// SetNonce sets the "nonce" field. +func (aruo *AuthRequestUpdateOne) SetNonce(s string) *AuthRequestUpdateOne { + aruo.mutation.SetNonce(s) + return aruo +} + +// SetState sets the "state" field. +func (aruo *AuthRequestUpdateOne) SetState(s string) *AuthRequestUpdateOne { + aruo.mutation.SetState(s) + return aruo +} + +// SetForceApprovalPrompt sets the "force_approval_prompt" field. +func (aruo *AuthRequestUpdateOne) SetForceApprovalPrompt(b bool) *AuthRequestUpdateOne { + aruo.mutation.SetForceApprovalPrompt(b) + return aruo +} + +// SetLoggedIn sets the "logged_in" field. +func (aruo *AuthRequestUpdateOne) SetLoggedIn(b bool) *AuthRequestUpdateOne { + aruo.mutation.SetLoggedIn(b) + return aruo +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (aruo *AuthRequestUpdateOne) SetClaimsUserID(s string) *AuthRequestUpdateOne { + aruo.mutation.SetClaimsUserID(s) + return aruo +} + +// SetClaimsUsername sets the "claims_username" field. +func (aruo *AuthRequestUpdateOne) SetClaimsUsername(s string) *AuthRequestUpdateOne { + aruo.mutation.SetClaimsUsername(s) + return aruo +} + +// SetClaimsEmail sets the "claims_email" field. +func (aruo *AuthRequestUpdateOne) SetClaimsEmail(s string) *AuthRequestUpdateOne { + aruo.mutation.SetClaimsEmail(s) + return aruo +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (aruo *AuthRequestUpdateOne) SetClaimsEmailVerified(b bool) *AuthRequestUpdateOne { + aruo.mutation.SetClaimsEmailVerified(b) + return aruo +} + +// SetClaimsGroups sets the "claims_groups" field. +func (aruo *AuthRequestUpdateOne) SetClaimsGroups(s []string) *AuthRequestUpdateOne { + aruo.mutation.SetClaimsGroups(s) + return aruo +} + +// AppendClaimsGroups appends s to the "claims_groups" field. +func (aruo *AuthRequestUpdateOne) AppendClaimsGroups(s []string) *AuthRequestUpdateOne { + aruo.mutation.AppendClaimsGroups(s) + return aruo +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (aruo *AuthRequestUpdateOne) ClearClaimsGroups() *AuthRequestUpdateOne { + aruo.mutation.ClearClaimsGroups() + return aruo +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (aruo *AuthRequestUpdateOne) SetClaimsPreferredUsername(s string) *AuthRequestUpdateOne { + aruo.mutation.SetClaimsPreferredUsername(s) + return aruo +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (aruo *AuthRequestUpdateOne) SetNillableClaimsPreferredUsername(s *string) *AuthRequestUpdateOne { + if s != nil { + aruo.SetClaimsPreferredUsername(*s) + } + return aruo +} + +// SetConnectorID sets the "connector_id" field. +func (aruo *AuthRequestUpdateOne) SetConnectorID(s string) *AuthRequestUpdateOne { + aruo.mutation.SetConnectorID(s) + return aruo +} + +// SetConnectorData sets the "connector_data" field. +func (aruo *AuthRequestUpdateOne) SetConnectorData(b []byte) *AuthRequestUpdateOne { + aruo.mutation.SetConnectorData(b) + return aruo +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (aruo *AuthRequestUpdateOne) ClearConnectorData() *AuthRequestUpdateOne { + aruo.mutation.ClearConnectorData() + return aruo +} + +// SetExpiry sets the "expiry" field. +func (aruo *AuthRequestUpdateOne) SetExpiry(t time.Time) *AuthRequestUpdateOne { + aruo.mutation.SetExpiry(t) + return aruo +} + +// SetCodeChallenge sets the "code_challenge" field. +func (aruo *AuthRequestUpdateOne) SetCodeChallenge(s string) *AuthRequestUpdateOne { + aruo.mutation.SetCodeChallenge(s) + return aruo +} + +// SetNillableCodeChallenge sets the "code_challenge" field if the given value is not nil. +func (aruo *AuthRequestUpdateOne) SetNillableCodeChallenge(s *string) *AuthRequestUpdateOne { + if s != nil { + aruo.SetCodeChallenge(*s) + } + return aruo +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (aruo *AuthRequestUpdateOne) SetCodeChallengeMethod(s string) *AuthRequestUpdateOne { + aruo.mutation.SetCodeChallengeMethod(s) + return aruo +} + +// SetNillableCodeChallengeMethod sets the "code_challenge_method" field if the given value is not nil. +func (aruo *AuthRequestUpdateOne) SetNillableCodeChallengeMethod(s *string) *AuthRequestUpdateOne { + if s != nil { + aruo.SetCodeChallengeMethod(*s) + } + return aruo +} + +// SetHmacKey sets the "hmac_key" field. +func (aruo *AuthRequestUpdateOne) SetHmacKey(b []byte) *AuthRequestUpdateOne { + aruo.mutation.SetHmacKey(b) + return aruo +} + +// Mutation returns the AuthRequestMutation object of the builder. +func (aruo *AuthRequestUpdateOne) Mutation() *AuthRequestMutation { + return aruo.mutation +} + +// Where appends a list predicates to the AuthRequestUpdate builder. +func (aruo *AuthRequestUpdateOne) Where(ps ...predicate.AuthRequest) *AuthRequestUpdateOne { + aruo.mutation.Where(ps...) + return aruo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (aruo *AuthRequestUpdateOne) Select(field string, fields ...string) *AuthRequestUpdateOne { + aruo.fields = append([]string{field}, fields...) + return aruo +} + +// Save executes the query and returns the updated AuthRequest entity. +func (aruo *AuthRequestUpdateOne) Save(ctx context.Context) (*AuthRequest, error) { + return withHooks(ctx, aruo.sqlSave, aruo.mutation, aruo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (aruo *AuthRequestUpdateOne) SaveX(ctx context.Context) *AuthRequest { + node, err := aruo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (aruo *AuthRequestUpdateOne) Exec(ctx context.Context) error { + _, err := aruo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (aruo *AuthRequestUpdateOne) ExecX(ctx context.Context) { + if err := aruo.Exec(ctx); err != nil { + panic(err) + } +} + +func (aruo *AuthRequestUpdateOne) sqlSave(ctx context.Context) (_node *AuthRequest, err error) { + _spec := sqlgraph.NewUpdateSpec(authrequest.Table, authrequest.Columns, sqlgraph.NewFieldSpec(authrequest.FieldID, field.TypeString)) + id, ok := aruo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "AuthRequest.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := aruo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, authrequest.FieldID) + for _, f := range fields { + if !authrequest.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != authrequest.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := aruo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := aruo.mutation.ClientID(); ok { + _spec.SetField(authrequest.FieldClientID, field.TypeString, value) + } + if value, ok := aruo.mutation.Scopes(); ok { + _spec.SetField(authrequest.FieldScopes, field.TypeJSON, value) + } + if value, ok := aruo.mutation.AppendedScopes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, authrequest.FieldScopes, value) + }) + } + if aruo.mutation.ScopesCleared() { + _spec.ClearField(authrequest.FieldScopes, field.TypeJSON) + } + if value, ok := aruo.mutation.ResponseTypes(); ok { + _spec.SetField(authrequest.FieldResponseTypes, field.TypeJSON, value) + } + if value, ok := aruo.mutation.AppendedResponseTypes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, authrequest.FieldResponseTypes, value) + }) + } + if aruo.mutation.ResponseTypesCleared() { + _spec.ClearField(authrequest.FieldResponseTypes, field.TypeJSON) + } + if value, ok := aruo.mutation.RedirectURI(); ok { + _spec.SetField(authrequest.FieldRedirectURI, field.TypeString, value) + } + if value, ok := aruo.mutation.Nonce(); ok { + _spec.SetField(authrequest.FieldNonce, field.TypeString, value) + } + if value, ok := aruo.mutation.State(); ok { + _spec.SetField(authrequest.FieldState, field.TypeString, value) + } + if value, ok := aruo.mutation.ForceApprovalPrompt(); ok { + _spec.SetField(authrequest.FieldForceApprovalPrompt, field.TypeBool, value) + } + if value, ok := aruo.mutation.LoggedIn(); ok { + _spec.SetField(authrequest.FieldLoggedIn, field.TypeBool, value) + } + if value, ok := aruo.mutation.ClaimsUserID(); ok { + _spec.SetField(authrequest.FieldClaimsUserID, field.TypeString, value) + } + if value, ok := aruo.mutation.ClaimsUsername(); ok { + _spec.SetField(authrequest.FieldClaimsUsername, field.TypeString, value) + } + if value, ok := aruo.mutation.ClaimsEmail(); ok { + _spec.SetField(authrequest.FieldClaimsEmail, field.TypeString, value) + } + if value, ok := aruo.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(authrequest.FieldClaimsEmailVerified, field.TypeBool, value) + } + if value, ok := aruo.mutation.ClaimsGroups(); ok { + _spec.SetField(authrequest.FieldClaimsGroups, field.TypeJSON, value) + } + if value, ok := aruo.mutation.AppendedClaimsGroups(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, authrequest.FieldClaimsGroups, value) + }) + } + if aruo.mutation.ClaimsGroupsCleared() { + _spec.ClearField(authrequest.FieldClaimsGroups, field.TypeJSON) + } + if value, ok := aruo.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(authrequest.FieldClaimsPreferredUsername, field.TypeString, value) + } + if value, ok := aruo.mutation.ConnectorID(); ok { + _spec.SetField(authrequest.FieldConnectorID, field.TypeString, value) + } + if value, ok := aruo.mutation.ConnectorData(); ok { + _spec.SetField(authrequest.FieldConnectorData, field.TypeBytes, value) + } + if aruo.mutation.ConnectorDataCleared() { + _spec.ClearField(authrequest.FieldConnectorData, field.TypeBytes) + } + if value, ok := aruo.mutation.Expiry(); ok { + _spec.SetField(authrequest.FieldExpiry, field.TypeTime, value) + } + if value, ok := aruo.mutation.CodeChallenge(); ok { + _spec.SetField(authrequest.FieldCodeChallenge, field.TypeString, value) + } + if value, ok := aruo.mutation.CodeChallengeMethod(); ok { + _spec.SetField(authrequest.FieldCodeChallengeMethod, field.TypeString, value) + } + if value, ok := aruo.mutation.HmacKey(); ok { + _spec.SetField(authrequest.FieldHmacKey, field.TypeBytes, value) + } + _node = &AuthRequest{config: aruo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, aruo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{authrequest.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + aruo.mutation.done = true + return _node, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/client.go b/vendor/github.com/dexidp/dex/storage/ent/db/client.go new file mode 100644 index 00000000..ddbab826 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/client.go @@ -0,0 +1,1462 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "log" + + "github.com/dexidp/dex/storage/ent/db/migrate" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/authcode" + "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/connector" + "github.com/dexidp/dex/storage/ent/db/devicerequest" + "github.com/dexidp/dex/storage/ent/db/devicetoken" + "github.com/dexidp/dex/storage/ent/db/keys" + "github.com/dexidp/dex/storage/ent/db/oauth2client" + "github.com/dexidp/dex/storage/ent/db/offlinesession" + "github.com/dexidp/dex/storage/ent/db/password" + "github.com/dexidp/dex/storage/ent/db/refreshtoken" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // AuthCode is the client for interacting with the AuthCode builders. + AuthCode *AuthCodeClient + // AuthRequest is the client for interacting with the AuthRequest builders. + AuthRequest *AuthRequestClient + // Connector is the client for interacting with the Connector builders. + Connector *ConnectorClient + // DeviceRequest is the client for interacting with the DeviceRequest builders. + DeviceRequest *DeviceRequestClient + // DeviceToken is the client for interacting with the DeviceToken builders. + DeviceToken *DeviceTokenClient + // Keys is the client for interacting with the Keys builders. + Keys *KeysClient + // OAuth2Client is the client for interacting with the OAuth2Client builders. + OAuth2Client *OAuth2ClientClient + // OfflineSession is the client for interacting with the OfflineSession builders. + OfflineSession *OfflineSessionClient + // Password is the client for interacting with the Password builders. + Password *PasswordClient + // RefreshToken is the client for interacting with the RefreshToken builders. + RefreshToken *RefreshTokenClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + client := &Client{config: cfg} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.AuthCode = NewAuthCodeClient(c.config) + c.AuthRequest = NewAuthRequestClient(c.config) + c.Connector = NewConnectorClient(c.config) + c.DeviceRequest = NewDeviceRequestClient(c.config) + c.DeviceToken = NewDeviceTokenClient(c.config) + c.Keys = NewKeysClient(c.config) + c.OAuth2Client = NewOAuth2ClientClient(c.config) + c.OfflineSession = NewOfflineSessionClient(c.config) + c.Password = NewPasswordClient(c.config) + c.RefreshToken = NewRefreshTokenClient(c.config) +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("db: cannot start a transaction within a transaction") + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("db: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + AuthCode: NewAuthCodeClient(cfg), + AuthRequest: NewAuthRequestClient(cfg), + Connector: NewConnectorClient(cfg), + DeviceRequest: NewDeviceRequestClient(cfg), + DeviceToken: NewDeviceTokenClient(cfg), + Keys: NewKeysClient(cfg), + OAuth2Client: NewOAuth2ClientClient(cfg), + OfflineSession: NewOfflineSessionClient(cfg), + Password: NewPasswordClient(cfg), + RefreshToken: NewRefreshTokenClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + AuthCode: NewAuthCodeClient(cfg), + AuthRequest: NewAuthRequestClient(cfg), + Connector: NewConnectorClient(cfg), + DeviceRequest: NewDeviceRequestClient(cfg), + DeviceToken: NewDeviceTokenClient(cfg), + Keys: NewKeysClient(cfg), + OAuth2Client: NewOAuth2ClientClient(cfg), + OfflineSession: NewOfflineSessionClient(cfg), + Password: NewPasswordClient(cfg), + RefreshToken: NewRefreshTokenClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// AuthCode. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + for _, n := range []interface{ Use(...Hook) }{ + c.AuthCode, c.AuthRequest, c.Connector, c.DeviceRequest, c.DeviceToken, c.Keys, + c.OAuth2Client, c.OfflineSession, c.Password, c.RefreshToken, + } { + n.Use(hooks...) + } +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + for _, n := range []interface{ Intercept(...Interceptor) }{ + c.AuthCode, c.AuthRequest, c.Connector, c.DeviceRequest, c.DeviceToken, c.Keys, + c.OAuth2Client, c.OfflineSession, c.Password, c.RefreshToken, + } { + n.Intercept(interceptors...) + } +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *AuthCodeMutation: + return c.AuthCode.mutate(ctx, m) + case *AuthRequestMutation: + return c.AuthRequest.mutate(ctx, m) + case *ConnectorMutation: + return c.Connector.mutate(ctx, m) + case *DeviceRequestMutation: + return c.DeviceRequest.mutate(ctx, m) + case *DeviceTokenMutation: + return c.DeviceToken.mutate(ctx, m) + case *KeysMutation: + return c.Keys.mutate(ctx, m) + case *OAuth2ClientMutation: + return c.OAuth2Client.mutate(ctx, m) + case *OfflineSessionMutation: + return c.OfflineSession.mutate(ctx, m) + case *PasswordMutation: + return c.Password.mutate(ctx, m) + case *RefreshTokenMutation: + return c.RefreshToken.mutate(ctx, m) + default: + return nil, fmt.Errorf("db: unknown mutation type %T", m) + } +} + +// AuthCodeClient is a client for the AuthCode schema. +type AuthCodeClient struct { + config +} + +// NewAuthCodeClient returns a client for the AuthCode from the given config. +func NewAuthCodeClient(c config) *AuthCodeClient { + return &AuthCodeClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `authcode.Hooks(f(g(h())))`. +func (c *AuthCodeClient) Use(hooks ...Hook) { + c.hooks.AuthCode = append(c.hooks.AuthCode, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `authcode.Intercept(f(g(h())))`. +func (c *AuthCodeClient) Intercept(interceptors ...Interceptor) { + c.inters.AuthCode = append(c.inters.AuthCode, interceptors...) +} + +// Create returns a builder for creating a AuthCode entity. +func (c *AuthCodeClient) Create() *AuthCodeCreate { + mutation := newAuthCodeMutation(c.config, OpCreate) + return &AuthCodeCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of AuthCode entities. +func (c *AuthCodeClient) CreateBulk(builders ...*AuthCodeCreate) *AuthCodeCreateBulk { + return &AuthCodeCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for AuthCode. +func (c *AuthCodeClient) Update() *AuthCodeUpdate { + mutation := newAuthCodeMutation(c.config, OpUpdate) + return &AuthCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AuthCodeClient) UpdateOne(ac *AuthCode) *AuthCodeUpdateOne { + mutation := newAuthCodeMutation(c.config, OpUpdateOne, withAuthCode(ac)) + return &AuthCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AuthCodeClient) UpdateOneID(id string) *AuthCodeUpdateOne { + mutation := newAuthCodeMutation(c.config, OpUpdateOne, withAuthCodeID(id)) + return &AuthCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for AuthCode. +func (c *AuthCodeClient) Delete() *AuthCodeDelete { + mutation := newAuthCodeMutation(c.config, OpDelete) + return &AuthCodeDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AuthCodeClient) DeleteOne(ac *AuthCode) *AuthCodeDeleteOne { + return c.DeleteOneID(ac.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AuthCodeClient) DeleteOneID(id string) *AuthCodeDeleteOne { + builder := c.Delete().Where(authcode.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AuthCodeDeleteOne{builder} +} + +// Query returns a query builder for AuthCode. +func (c *AuthCodeClient) Query() *AuthCodeQuery { + return &AuthCodeQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAuthCode}, + inters: c.Interceptors(), + } +} + +// Get returns a AuthCode entity by its id. +func (c *AuthCodeClient) Get(ctx context.Context, id string) (*AuthCode, error) { + return c.Query().Where(authcode.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AuthCodeClient) GetX(ctx context.Context, id string) *AuthCode { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *AuthCodeClient) Hooks() []Hook { + return c.hooks.AuthCode +} + +// Interceptors returns the client interceptors. +func (c *AuthCodeClient) Interceptors() []Interceptor { + return c.inters.AuthCode +} + +func (c *AuthCodeClient) mutate(ctx context.Context, m *AuthCodeMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AuthCodeCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AuthCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AuthCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AuthCodeDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown AuthCode mutation op: %q", m.Op()) + } +} + +// AuthRequestClient is a client for the AuthRequest schema. +type AuthRequestClient struct { + config +} + +// NewAuthRequestClient returns a client for the AuthRequest from the given config. +func NewAuthRequestClient(c config) *AuthRequestClient { + return &AuthRequestClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `authrequest.Hooks(f(g(h())))`. +func (c *AuthRequestClient) Use(hooks ...Hook) { + c.hooks.AuthRequest = append(c.hooks.AuthRequest, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `authrequest.Intercept(f(g(h())))`. +func (c *AuthRequestClient) Intercept(interceptors ...Interceptor) { + c.inters.AuthRequest = append(c.inters.AuthRequest, interceptors...) +} + +// Create returns a builder for creating a AuthRequest entity. +func (c *AuthRequestClient) Create() *AuthRequestCreate { + mutation := newAuthRequestMutation(c.config, OpCreate) + return &AuthRequestCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of AuthRequest entities. +func (c *AuthRequestClient) CreateBulk(builders ...*AuthRequestCreate) *AuthRequestCreateBulk { + return &AuthRequestCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for AuthRequest. +func (c *AuthRequestClient) Update() *AuthRequestUpdate { + mutation := newAuthRequestMutation(c.config, OpUpdate) + return &AuthRequestUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AuthRequestClient) UpdateOne(ar *AuthRequest) *AuthRequestUpdateOne { + mutation := newAuthRequestMutation(c.config, OpUpdateOne, withAuthRequest(ar)) + return &AuthRequestUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AuthRequestClient) UpdateOneID(id string) *AuthRequestUpdateOne { + mutation := newAuthRequestMutation(c.config, OpUpdateOne, withAuthRequestID(id)) + return &AuthRequestUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for AuthRequest. +func (c *AuthRequestClient) Delete() *AuthRequestDelete { + mutation := newAuthRequestMutation(c.config, OpDelete) + return &AuthRequestDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AuthRequestClient) DeleteOne(ar *AuthRequest) *AuthRequestDeleteOne { + return c.DeleteOneID(ar.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AuthRequestClient) DeleteOneID(id string) *AuthRequestDeleteOne { + builder := c.Delete().Where(authrequest.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AuthRequestDeleteOne{builder} +} + +// Query returns a query builder for AuthRequest. +func (c *AuthRequestClient) Query() *AuthRequestQuery { + return &AuthRequestQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAuthRequest}, + inters: c.Interceptors(), + } +} + +// Get returns a AuthRequest entity by its id. +func (c *AuthRequestClient) Get(ctx context.Context, id string) (*AuthRequest, error) { + return c.Query().Where(authrequest.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AuthRequestClient) GetX(ctx context.Context, id string) *AuthRequest { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *AuthRequestClient) Hooks() []Hook { + return c.hooks.AuthRequest +} + +// Interceptors returns the client interceptors. +func (c *AuthRequestClient) Interceptors() []Interceptor { + return c.inters.AuthRequest +} + +func (c *AuthRequestClient) mutate(ctx context.Context, m *AuthRequestMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AuthRequestCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AuthRequestUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AuthRequestUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AuthRequestDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown AuthRequest mutation op: %q", m.Op()) + } +} + +// ConnectorClient is a client for the Connector schema. +type ConnectorClient struct { + config +} + +// NewConnectorClient returns a client for the Connector from the given config. +func NewConnectorClient(c config) *ConnectorClient { + return &ConnectorClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `connector.Hooks(f(g(h())))`. +func (c *ConnectorClient) Use(hooks ...Hook) { + c.hooks.Connector = append(c.hooks.Connector, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `connector.Intercept(f(g(h())))`. +func (c *ConnectorClient) Intercept(interceptors ...Interceptor) { + c.inters.Connector = append(c.inters.Connector, interceptors...) +} + +// Create returns a builder for creating a Connector entity. +func (c *ConnectorClient) Create() *ConnectorCreate { + mutation := newConnectorMutation(c.config, OpCreate) + return &ConnectorCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Connector entities. +func (c *ConnectorClient) CreateBulk(builders ...*ConnectorCreate) *ConnectorCreateBulk { + return &ConnectorCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Connector. +func (c *ConnectorClient) Update() *ConnectorUpdate { + mutation := newConnectorMutation(c.config, OpUpdate) + return &ConnectorUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ConnectorClient) UpdateOne(co *Connector) *ConnectorUpdateOne { + mutation := newConnectorMutation(c.config, OpUpdateOne, withConnector(co)) + return &ConnectorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ConnectorClient) UpdateOneID(id string) *ConnectorUpdateOne { + mutation := newConnectorMutation(c.config, OpUpdateOne, withConnectorID(id)) + return &ConnectorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Connector. +func (c *ConnectorClient) Delete() *ConnectorDelete { + mutation := newConnectorMutation(c.config, OpDelete) + return &ConnectorDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ConnectorClient) DeleteOne(co *Connector) *ConnectorDeleteOne { + return c.DeleteOneID(co.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ConnectorClient) DeleteOneID(id string) *ConnectorDeleteOne { + builder := c.Delete().Where(connector.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ConnectorDeleteOne{builder} +} + +// Query returns a query builder for Connector. +func (c *ConnectorClient) Query() *ConnectorQuery { + return &ConnectorQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeConnector}, + inters: c.Interceptors(), + } +} + +// Get returns a Connector entity by its id. +func (c *ConnectorClient) Get(ctx context.Context, id string) (*Connector, error) { + return c.Query().Where(connector.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ConnectorClient) GetX(ctx context.Context, id string) *Connector { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *ConnectorClient) Hooks() []Hook { + return c.hooks.Connector +} + +// Interceptors returns the client interceptors. +func (c *ConnectorClient) Interceptors() []Interceptor { + return c.inters.Connector +} + +func (c *ConnectorClient) mutate(ctx context.Context, m *ConnectorMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ConnectorCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ConnectorUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ConnectorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ConnectorDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown Connector mutation op: %q", m.Op()) + } +} + +// DeviceRequestClient is a client for the DeviceRequest schema. +type DeviceRequestClient struct { + config +} + +// NewDeviceRequestClient returns a client for the DeviceRequest from the given config. +func NewDeviceRequestClient(c config) *DeviceRequestClient { + return &DeviceRequestClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `devicerequest.Hooks(f(g(h())))`. +func (c *DeviceRequestClient) Use(hooks ...Hook) { + c.hooks.DeviceRequest = append(c.hooks.DeviceRequest, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `devicerequest.Intercept(f(g(h())))`. +func (c *DeviceRequestClient) Intercept(interceptors ...Interceptor) { + c.inters.DeviceRequest = append(c.inters.DeviceRequest, interceptors...) +} + +// Create returns a builder for creating a DeviceRequest entity. +func (c *DeviceRequestClient) Create() *DeviceRequestCreate { + mutation := newDeviceRequestMutation(c.config, OpCreate) + return &DeviceRequestCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of DeviceRequest entities. +func (c *DeviceRequestClient) CreateBulk(builders ...*DeviceRequestCreate) *DeviceRequestCreateBulk { + return &DeviceRequestCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for DeviceRequest. +func (c *DeviceRequestClient) Update() *DeviceRequestUpdate { + mutation := newDeviceRequestMutation(c.config, OpUpdate) + return &DeviceRequestUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DeviceRequestClient) UpdateOne(dr *DeviceRequest) *DeviceRequestUpdateOne { + mutation := newDeviceRequestMutation(c.config, OpUpdateOne, withDeviceRequest(dr)) + return &DeviceRequestUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DeviceRequestClient) UpdateOneID(id int) *DeviceRequestUpdateOne { + mutation := newDeviceRequestMutation(c.config, OpUpdateOne, withDeviceRequestID(id)) + return &DeviceRequestUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for DeviceRequest. +func (c *DeviceRequestClient) Delete() *DeviceRequestDelete { + mutation := newDeviceRequestMutation(c.config, OpDelete) + return &DeviceRequestDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DeviceRequestClient) DeleteOne(dr *DeviceRequest) *DeviceRequestDeleteOne { + return c.DeleteOneID(dr.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *DeviceRequestClient) DeleteOneID(id int) *DeviceRequestDeleteOne { + builder := c.Delete().Where(devicerequest.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DeviceRequestDeleteOne{builder} +} + +// Query returns a query builder for DeviceRequest. +func (c *DeviceRequestClient) Query() *DeviceRequestQuery { + return &DeviceRequestQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeDeviceRequest}, + inters: c.Interceptors(), + } +} + +// Get returns a DeviceRequest entity by its id. +func (c *DeviceRequestClient) Get(ctx context.Context, id int) (*DeviceRequest, error) { + return c.Query().Where(devicerequest.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DeviceRequestClient) GetX(ctx context.Context, id int) *DeviceRequest { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *DeviceRequestClient) Hooks() []Hook { + return c.hooks.DeviceRequest +} + +// Interceptors returns the client interceptors. +func (c *DeviceRequestClient) Interceptors() []Interceptor { + return c.inters.DeviceRequest +} + +func (c *DeviceRequestClient) mutate(ctx context.Context, m *DeviceRequestMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DeviceRequestCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DeviceRequestUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DeviceRequestUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DeviceRequestDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown DeviceRequest mutation op: %q", m.Op()) + } +} + +// DeviceTokenClient is a client for the DeviceToken schema. +type DeviceTokenClient struct { + config +} + +// NewDeviceTokenClient returns a client for the DeviceToken from the given config. +func NewDeviceTokenClient(c config) *DeviceTokenClient { + return &DeviceTokenClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `devicetoken.Hooks(f(g(h())))`. +func (c *DeviceTokenClient) Use(hooks ...Hook) { + c.hooks.DeviceToken = append(c.hooks.DeviceToken, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `devicetoken.Intercept(f(g(h())))`. +func (c *DeviceTokenClient) Intercept(interceptors ...Interceptor) { + c.inters.DeviceToken = append(c.inters.DeviceToken, interceptors...) +} + +// Create returns a builder for creating a DeviceToken entity. +func (c *DeviceTokenClient) Create() *DeviceTokenCreate { + mutation := newDeviceTokenMutation(c.config, OpCreate) + return &DeviceTokenCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of DeviceToken entities. +func (c *DeviceTokenClient) CreateBulk(builders ...*DeviceTokenCreate) *DeviceTokenCreateBulk { + return &DeviceTokenCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for DeviceToken. +func (c *DeviceTokenClient) Update() *DeviceTokenUpdate { + mutation := newDeviceTokenMutation(c.config, OpUpdate) + return &DeviceTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DeviceTokenClient) UpdateOne(dt *DeviceToken) *DeviceTokenUpdateOne { + mutation := newDeviceTokenMutation(c.config, OpUpdateOne, withDeviceToken(dt)) + return &DeviceTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DeviceTokenClient) UpdateOneID(id int) *DeviceTokenUpdateOne { + mutation := newDeviceTokenMutation(c.config, OpUpdateOne, withDeviceTokenID(id)) + return &DeviceTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for DeviceToken. +func (c *DeviceTokenClient) Delete() *DeviceTokenDelete { + mutation := newDeviceTokenMutation(c.config, OpDelete) + return &DeviceTokenDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DeviceTokenClient) DeleteOne(dt *DeviceToken) *DeviceTokenDeleteOne { + return c.DeleteOneID(dt.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *DeviceTokenClient) DeleteOneID(id int) *DeviceTokenDeleteOne { + builder := c.Delete().Where(devicetoken.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DeviceTokenDeleteOne{builder} +} + +// Query returns a query builder for DeviceToken. +func (c *DeviceTokenClient) Query() *DeviceTokenQuery { + return &DeviceTokenQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeDeviceToken}, + inters: c.Interceptors(), + } +} + +// Get returns a DeviceToken entity by its id. +func (c *DeviceTokenClient) Get(ctx context.Context, id int) (*DeviceToken, error) { + return c.Query().Where(devicetoken.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DeviceTokenClient) GetX(ctx context.Context, id int) *DeviceToken { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *DeviceTokenClient) Hooks() []Hook { + return c.hooks.DeviceToken +} + +// Interceptors returns the client interceptors. +func (c *DeviceTokenClient) Interceptors() []Interceptor { + return c.inters.DeviceToken +} + +func (c *DeviceTokenClient) mutate(ctx context.Context, m *DeviceTokenMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DeviceTokenCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DeviceTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DeviceTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DeviceTokenDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown DeviceToken mutation op: %q", m.Op()) + } +} + +// KeysClient is a client for the Keys schema. +type KeysClient struct { + config +} + +// NewKeysClient returns a client for the Keys from the given config. +func NewKeysClient(c config) *KeysClient { + return &KeysClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `keys.Hooks(f(g(h())))`. +func (c *KeysClient) Use(hooks ...Hook) { + c.hooks.Keys = append(c.hooks.Keys, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `keys.Intercept(f(g(h())))`. +func (c *KeysClient) Intercept(interceptors ...Interceptor) { + c.inters.Keys = append(c.inters.Keys, interceptors...) +} + +// Create returns a builder for creating a Keys entity. +func (c *KeysClient) Create() *KeysCreate { + mutation := newKeysMutation(c.config, OpCreate) + return &KeysCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Keys entities. +func (c *KeysClient) CreateBulk(builders ...*KeysCreate) *KeysCreateBulk { + return &KeysCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Keys. +func (c *KeysClient) Update() *KeysUpdate { + mutation := newKeysMutation(c.config, OpUpdate) + return &KeysUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *KeysClient) UpdateOne(k *Keys) *KeysUpdateOne { + mutation := newKeysMutation(c.config, OpUpdateOne, withKeys(k)) + return &KeysUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *KeysClient) UpdateOneID(id string) *KeysUpdateOne { + mutation := newKeysMutation(c.config, OpUpdateOne, withKeysID(id)) + return &KeysUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Keys. +func (c *KeysClient) Delete() *KeysDelete { + mutation := newKeysMutation(c.config, OpDelete) + return &KeysDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *KeysClient) DeleteOne(k *Keys) *KeysDeleteOne { + return c.DeleteOneID(k.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *KeysClient) DeleteOneID(id string) *KeysDeleteOne { + builder := c.Delete().Where(keys.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &KeysDeleteOne{builder} +} + +// Query returns a query builder for Keys. +func (c *KeysClient) Query() *KeysQuery { + return &KeysQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeKeys}, + inters: c.Interceptors(), + } +} + +// Get returns a Keys entity by its id. +func (c *KeysClient) Get(ctx context.Context, id string) (*Keys, error) { + return c.Query().Where(keys.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *KeysClient) GetX(ctx context.Context, id string) *Keys { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *KeysClient) Hooks() []Hook { + return c.hooks.Keys +} + +// Interceptors returns the client interceptors. +func (c *KeysClient) Interceptors() []Interceptor { + return c.inters.Keys +} + +func (c *KeysClient) mutate(ctx context.Context, m *KeysMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&KeysCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&KeysUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&KeysUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&KeysDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown Keys mutation op: %q", m.Op()) + } +} + +// OAuth2ClientClient is a client for the OAuth2Client schema. +type OAuth2ClientClient struct { + config +} + +// NewOAuth2ClientClient returns a client for the OAuth2Client from the given config. +func NewOAuth2ClientClient(c config) *OAuth2ClientClient { + return &OAuth2ClientClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `oauth2client.Hooks(f(g(h())))`. +func (c *OAuth2ClientClient) Use(hooks ...Hook) { + c.hooks.OAuth2Client = append(c.hooks.OAuth2Client, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `oauth2client.Intercept(f(g(h())))`. +func (c *OAuth2ClientClient) Intercept(interceptors ...Interceptor) { + c.inters.OAuth2Client = append(c.inters.OAuth2Client, interceptors...) +} + +// Create returns a builder for creating a OAuth2Client entity. +func (c *OAuth2ClientClient) Create() *OAuth2ClientCreate { + mutation := newOAuth2ClientMutation(c.config, OpCreate) + return &OAuth2ClientCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of OAuth2Client entities. +func (c *OAuth2ClientClient) CreateBulk(builders ...*OAuth2ClientCreate) *OAuth2ClientCreateBulk { + return &OAuth2ClientCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for OAuth2Client. +func (c *OAuth2ClientClient) Update() *OAuth2ClientUpdate { + mutation := newOAuth2ClientMutation(c.config, OpUpdate) + return &OAuth2ClientUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *OAuth2ClientClient) UpdateOne(o *OAuth2Client) *OAuth2ClientUpdateOne { + mutation := newOAuth2ClientMutation(c.config, OpUpdateOne, withOAuth2Client(o)) + return &OAuth2ClientUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *OAuth2ClientClient) UpdateOneID(id string) *OAuth2ClientUpdateOne { + mutation := newOAuth2ClientMutation(c.config, OpUpdateOne, withOAuth2ClientID(id)) + return &OAuth2ClientUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for OAuth2Client. +func (c *OAuth2ClientClient) Delete() *OAuth2ClientDelete { + mutation := newOAuth2ClientMutation(c.config, OpDelete) + return &OAuth2ClientDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *OAuth2ClientClient) DeleteOne(o *OAuth2Client) *OAuth2ClientDeleteOne { + return c.DeleteOneID(o.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *OAuth2ClientClient) DeleteOneID(id string) *OAuth2ClientDeleteOne { + builder := c.Delete().Where(oauth2client.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &OAuth2ClientDeleteOne{builder} +} + +// Query returns a query builder for OAuth2Client. +func (c *OAuth2ClientClient) Query() *OAuth2ClientQuery { + return &OAuth2ClientQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeOAuth2Client}, + inters: c.Interceptors(), + } +} + +// Get returns a OAuth2Client entity by its id. +func (c *OAuth2ClientClient) Get(ctx context.Context, id string) (*OAuth2Client, error) { + return c.Query().Where(oauth2client.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *OAuth2ClientClient) GetX(ctx context.Context, id string) *OAuth2Client { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *OAuth2ClientClient) Hooks() []Hook { + return c.hooks.OAuth2Client +} + +// Interceptors returns the client interceptors. +func (c *OAuth2ClientClient) Interceptors() []Interceptor { + return c.inters.OAuth2Client +} + +func (c *OAuth2ClientClient) mutate(ctx context.Context, m *OAuth2ClientMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&OAuth2ClientCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&OAuth2ClientUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&OAuth2ClientUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&OAuth2ClientDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown OAuth2Client mutation op: %q", m.Op()) + } +} + +// OfflineSessionClient is a client for the OfflineSession schema. +type OfflineSessionClient struct { + config +} + +// NewOfflineSessionClient returns a client for the OfflineSession from the given config. +func NewOfflineSessionClient(c config) *OfflineSessionClient { + return &OfflineSessionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `offlinesession.Hooks(f(g(h())))`. +func (c *OfflineSessionClient) Use(hooks ...Hook) { + c.hooks.OfflineSession = append(c.hooks.OfflineSession, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `offlinesession.Intercept(f(g(h())))`. +func (c *OfflineSessionClient) Intercept(interceptors ...Interceptor) { + c.inters.OfflineSession = append(c.inters.OfflineSession, interceptors...) +} + +// Create returns a builder for creating a OfflineSession entity. +func (c *OfflineSessionClient) Create() *OfflineSessionCreate { + mutation := newOfflineSessionMutation(c.config, OpCreate) + return &OfflineSessionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of OfflineSession entities. +func (c *OfflineSessionClient) CreateBulk(builders ...*OfflineSessionCreate) *OfflineSessionCreateBulk { + return &OfflineSessionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for OfflineSession. +func (c *OfflineSessionClient) Update() *OfflineSessionUpdate { + mutation := newOfflineSessionMutation(c.config, OpUpdate) + return &OfflineSessionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *OfflineSessionClient) UpdateOne(os *OfflineSession) *OfflineSessionUpdateOne { + mutation := newOfflineSessionMutation(c.config, OpUpdateOne, withOfflineSession(os)) + return &OfflineSessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *OfflineSessionClient) UpdateOneID(id string) *OfflineSessionUpdateOne { + mutation := newOfflineSessionMutation(c.config, OpUpdateOne, withOfflineSessionID(id)) + return &OfflineSessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for OfflineSession. +func (c *OfflineSessionClient) Delete() *OfflineSessionDelete { + mutation := newOfflineSessionMutation(c.config, OpDelete) + return &OfflineSessionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *OfflineSessionClient) DeleteOne(os *OfflineSession) *OfflineSessionDeleteOne { + return c.DeleteOneID(os.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *OfflineSessionClient) DeleteOneID(id string) *OfflineSessionDeleteOne { + builder := c.Delete().Where(offlinesession.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &OfflineSessionDeleteOne{builder} +} + +// Query returns a query builder for OfflineSession. +func (c *OfflineSessionClient) Query() *OfflineSessionQuery { + return &OfflineSessionQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeOfflineSession}, + inters: c.Interceptors(), + } +} + +// Get returns a OfflineSession entity by its id. +func (c *OfflineSessionClient) Get(ctx context.Context, id string) (*OfflineSession, error) { + return c.Query().Where(offlinesession.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *OfflineSessionClient) GetX(ctx context.Context, id string) *OfflineSession { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *OfflineSessionClient) Hooks() []Hook { + return c.hooks.OfflineSession +} + +// Interceptors returns the client interceptors. +func (c *OfflineSessionClient) Interceptors() []Interceptor { + return c.inters.OfflineSession +} + +func (c *OfflineSessionClient) mutate(ctx context.Context, m *OfflineSessionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&OfflineSessionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&OfflineSessionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&OfflineSessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&OfflineSessionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown OfflineSession mutation op: %q", m.Op()) + } +} + +// PasswordClient is a client for the Password schema. +type PasswordClient struct { + config +} + +// NewPasswordClient returns a client for the Password from the given config. +func NewPasswordClient(c config) *PasswordClient { + return &PasswordClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `password.Hooks(f(g(h())))`. +func (c *PasswordClient) Use(hooks ...Hook) { + c.hooks.Password = append(c.hooks.Password, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `password.Intercept(f(g(h())))`. +func (c *PasswordClient) Intercept(interceptors ...Interceptor) { + c.inters.Password = append(c.inters.Password, interceptors...) +} + +// Create returns a builder for creating a Password entity. +func (c *PasswordClient) Create() *PasswordCreate { + mutation := newPasswordMutation(c.config, OpCreate) + return &PasswordCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Password entities. +func (c *PasswordClient) CreateBulk(builders ...*PasswordCreate) *PasswordCreateBulk { + return &PasswordCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Password. +func (c *PasswordClient) Update() *PasswordUpdate { + mutation := newPasswordMutation(c.config, OpUpdate) + return &PasswordUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PasswordClient) UpdateOne(pa *Password) *PasswordUpdateOne { + mutation := newPasswordMutation(c.config, OpUpdateOne, withPassword(pa)) + return &PasswordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PasswordClient) UpdateOneID(id int) *PasswordUpdateOne { + mutation := newPasswordMutation(c.config, OpUpdateOne, withPasswordID(id)) + return &PasswordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Password. +func (c *PasswordClient) Delete() *PasswordDelete { + mutation := newPasswordMutation(c.config, OpDelete) + return &PasswordDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PasswordClient) DeleteOne(pa *Password) *PasswordDeleteOne { + return c.DeleteOneID(pa.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PasswordClient) DeleteOneID(id int) *PasswordDeleteOne { + builder := c.Delete().Where(password.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PasswordDeleteOne{builder} +} + +// Query returns a query builder for Password. +func (c *PasswordClient) Query() *PasswordQuery { + return &PasswordQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePassword}, + inters: c.Interceptors(), + } +} + +// Get returns a Password entity by its id. +func (c *PasswordClient) Get(ctx context.Context, id int) (*Password, error) { + return c.Query().Where(password.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PasswordClient) GetX(ctx context.Context, id int) *Password { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *PasswordClient) Hooks() []Hook { + return c.hooks.Password +} + +// Interceptors returns the client interceptors. +func (c *PasswordClient) Interceptors() []Interceptor { + return c.inters.Password +} + +func (c *PasswordClient) mutate(ctx context.Context, m *PasswordMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PasswordCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PasswordUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PasswordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PasswordDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown Password mutation op: %q", m.Op()) + } +} + +// RefreshTokenClient is a client for the RefreshToken schema. +type RefreshTokenClient struct { + config +} + +// NewRefreshTokenClient returns a client for the RefreshToken from the given config. +func NewRefreshTokenClient(c config) *RefreshTokenClient { + return &RefreshTokenClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `refreshtoken.Hooks(f(g(h())))`. +func (c *RefreshTokenClient) Use(hooks ...Hook) { + c.hooks.RefreshToken = append(c.hooks.RefreshToken, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `refreshtoken.Intercept(f(g(h())))`. +func (c *RefreshTokenClient) Intercept(interceptors ...Interceptor) { + c.inters.RefreshToken = append(c.inters.RefreshToken, interceptors...) +} + +// Create returns a builder for creating a RefreshToken entity. +func (c *RefreshTokenClient) Create() *RefreshTokenCreate { + mutation := newRefreshTokenMutation(c.config, OpCreate) + return &RefreshTokenCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of RefreshToken entities. +func (c *RefreshTokenClient) CreateBulk(builders ...*RefreshTokenCreate) *RefreshTokenCreateBulk { + return &RefreshTokenCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for RefreshToken. +func (c *RefreshTokenClient) Update() *RefreshTokenUpdate { + mutation := newRefreshTokenMutation(c.config, OpUpdate) + return &RefreshTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *RefreshTokenClient) UpdateOne(rt *RefreshToken) *RefreshTokenUpdateOne { + mutation := newRefreshTokenMutation(c.config, OpUpdateOne, withRefreshToken(rt)) + return &RefreshTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *RefreshTokenClient) UpdateOneID(id string) *RefreshTokenUpdateOne { + mutation := newRefreshTokenMutation(c.config, OpUpdateOne, withRefreshTokenID(id)) + return &RefreshTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for RefreshToken. +func (c *RefreshTokenClient) Delete() *RefreshTokenDelete { + mutation := newRefreshTokenMutation(c.config, OpDelete) + return &RefreshTokenDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *RefreshTokenClient) DeleteOne(rt *RefreshToken) *RefreshTokenDeleteOne { + return c.DeleteOneID(rt.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *RefreshTokenClient) DeleteOneID(id string) *RefreshTokenDeleteOne { + builder := c.Delete().Where(refreshtoken.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &RefreshTokenDeleteOne{builder} +} + +// Query returns a query builder for RefreshToken. +func (c *RefreshTokenClient) Query() *RefreshTokenQuery { + return &RefreshTokenQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeRefreshToken}, + inters: c.Interceptors(), + } +} + +// Get returns a RefreshToken entity by its id. +func (c *RefreshTokenClient) Get(ctx context.Context, id string) (*RefreshToken, error) { + return c.Query().Where(refreshtoken.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *RefreshTokenClient) GetX(ctx context.Context, id string) *RefreshToken { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *RefreshTokenClient) Hooks() []Hook { + return c.hooks.RefreshToken +} + +// Interceptors returns the client interceptors. +func (c *RefreshTokenClient) Interceptors() []Interceptor { + return c.inters.RefreshToken +} + +func (c *RefreshTokenClient) mutate(ctx context.Context, m *RefreshTokenMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&RefreshTokenCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&RefreshTokenUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&RefreshTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&RefreshTokenDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown RefreshToken mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + AuthCode, AuthRequest, Connector, DeviceRequest, DeviceToken, Keys, + OAuth2Client, OfflineSession, Password, RefreshToken []ent.Hook + } + inters struct { + AuthCode, AuthRequest, Connector, DeviceRequest, DeviceToken, Keys, + OAuth2Client, OfflineSession, Password, RefreshToken []ent.Interceptor + } +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/connector.go b/vendor/github.com/dexidp/dex/storage/ent/db/connector.go new file mode 100644 index 00000000..34c88e31 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/connector.go @@ -0,0 +1,136 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "fmt" + "strings" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/connector" +) + +// Connector is the model entity for the Connector schema. +type Connector struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // Type holds the value of the "type" field. + Type string `json:"type,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // ResourceVersion holds the value of the "resource_version" field. + ResourceVersion string `json:"resource_version,omitempty"` + // Config holds the value of the "config" field. + Config []byte `json:"config,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Connector) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case connector.FieldConfig: + values[i] = new([]byte) + case connector.FieldID, connector.FieldType, connector.FieldName, connector.FieldResourceVersion: + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Connector fields. +func (c *Connector) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case connector.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + c.ID = value.String + } + case connector.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + c.Type = value.String + } + case connector.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + c.Name = value.String + } + case connector.FieldResourceVersion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field resource_version", values[i]) + } else if value.Valid { + c.ResourceVersion = value.String + } + case connector.FieldConfig: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field config", values[i]) + } else if value != nil { + c.Config = *value + } + default: + c.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Connector. +// This includes values selected through modifiers, order, etc. +func (c *Connector) Value(name string) (ent.Value, error) { + return c.selectValues.Get(name) +} + +// Update returns a builder for updating this Connector. +// Note that you need to call Connector.Unwrap() before calling this method if this Connector +// was returned from a transaction, and the transaction was committed or rolled back. +func (c *Connector) Update() *ConnectorUpdateOne { + return NewConnectorClient(c.config).UpdateOne(c) +} + +// Unwrap unwraps the Connector entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (c *Connector) Unwrap() *Connector { + _tx, ok := c.config.driver.(*txDriver) + if !ok { + panic("db: Connector is not a transactional entity") + } + c.config.driver = _tx.drv + return c +} + +// String implements the fmt.Stringer. +func (c *Connector) String() string { + var builder strings.Builder + builder.WriteString("Connector(") + builder.WriteString(fmt.Sprintf("id=%v, ", c.ID)) + builder.WriteString("type=") + builder.WriteString(c.Type) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(c.Name) + builder.WriteString(", ") + builder.WriteString("resource_version=") + builder.WriteString(c.ResourceVersion) + builder.WriteString(", ") + builder.WriteString("config=") + builder.WriteString(fmt.Sprintf("%v", c.Config)) + builder.WriteByte(')') + return builder.String() +} + +// Connectors is a parsable slice of Connector. +type Connectors []*Connector diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/connector/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/connector/BUILD new file mode 100644 index 00000000..da9020cd --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/connector/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "connector", + srcs = [ + "connector.go", + "where.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/connector", + importpath = "github.com/dexidp/dex/storage/ent/db/connector", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/connector/connector.go b/vendor/github.com/dexidp/dex/storage/ent/db/connector/connector.go new file mode 100644 index 00000000..996328c1 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/connector/connector.go @@ -0,0 +1,75 @@ +// Code generated by ent, DO NOT EDIT. + +package connector + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the connector type in the database. + Label = "connector" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldResourceVersion holds the string denoting the resource_version field in the database. + FieldResourceVersion = "resource_version" + // FieldConfig holds the string denoting the config field in the database. + FieldConfig = "config" + // Table holds the table name of the connector in the database. + Table = "connectors" +) + +// Columns holds all SQL columns for connector fields. +var Columns = []string{ + FieldID, + FieldType, + FieldName, + FieldResourceVersion, + FieldConfig, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // TypeValidator is a validator for the "type" field. It is called by the builders before save. + TypeValidator func(string) error + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // IDValidator is a validator for the "id" field. It is called by the builders before save. + IDValidator func(string) error +) + +// OrderOption defines the ordering options for the Connector queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByResourceVersion orders the results by the resource_version field. +func ByResourceVersion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldResourceVersion, opts...).ToFunc() +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/connector/where.go b/vendor/github.com/dexidp/dex/storage/ent/db/connector/where.go new file mode 100644 index 00000000..f2efee7a --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/connector/where.go @@ -0,0 +1,350 @@ +// Code generated by ent, DO NOT EDIT. + +package connector + +import ( + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.Connector { + return predicate.Connector(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.Connector { + return predicate.Connector(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.Connector { + return predicate.Connector(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.Connector { + return predicate.Connector(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.Connector { + return predicate.Connector(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.Connector { + return predicate.Connector(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.Connector { + return predicate.Connector(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.Connector { + return predicate.Connector(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.Connector { + return predicate.Connector(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.Connector { + return predicate.Connector(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.Connector { + return predicate.Connector(sql.FieldContainsFold(FieldID, id)) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.Connector { + return predicate.Connector(sql.FieldEQ(FieldType, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Connector { + return predicate.Connector(sql.FieldEQ(FieldName, v)) +} + +// ResourceVersion applies equality check predicate on the "resource_version" field. It's identical to ResourceVersionEQ. +func ResourceVersion(v string) predicate.Connector { + return predicate.Connector(sql.FieldEQ(FieldResourceVersion, v)) +} + +// Config applies equality check predicate on the "config" field. It's identical to ConfigEQ. +func Config(v []byte) predicate.Connector { + return predicate.Connector(sql.FieldEQ(FieldConfig, v)) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.Connector { + return predicate.Connector(sql.FieldEQ(FieldType, v)) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.Connector { + return predicate.Connector(sql.FieldNEQ(FieldType, v)) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.Connector { + return predicate.Connector(sql.FieldIn(FieldType, vs...)) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.Connector { + return predicate.Connector(sql.FieldNotIn(FieldType, vs...)) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.Connector { + return predicate.Connector(sql.FieldGT(FieldType, v)) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.Connector { + return predicate.Connector(sql.FieldGTE(FieldType, v)) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.Connector { + return predicate.Connector(sql.FieldLT(FieldType, v)) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.Connector { + return predicate.Connector(sql.FieldLTE(FieldType, v)) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.Connector { + return predicate.Connector(sql.FieldContains(FieldType, v)) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.Connector { + return predicate.Connector(sql.FieldHasPrefix(FieldType, v)) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.Connector { + return predicate.Connector(sql.FieldHasSuffix(FieldType, v)) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.Connector { + return predicate.Connector(sql.FieldEqualFold(FieldType, v)) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.Connector { + return predicate.Connector(sql.FieldContainsFold(FieldType, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Connector { + return predicate.Connector(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Connector { + return predicate.Connector(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Connector { + return predicate.Connector(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Connector { + return predicate.Connector(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Connector { + return predicate.Connector(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Connector { + return predicate.Connector(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Connector { + return predicate.Connector(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Connector { + return predicate.Connector(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Connector { + return predicate.Connector(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Connector { + return predicate.Connector(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Connector { + return predicate.Connector(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Connector { + return predicate.Connector(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Connector { + return predicate.Connector(sql.FieldContainsFold(FieldName, v)) +} + +// ResourceVersionEQ applies the EQ predicate on the "resource_version" field. +func ResourceVersionEQ(v string) predicate.Connector { + return predicate.Connector(sql.FieldEQ(FieldResourceVersion, v)) +} + +// ResourceVersionNEQ applies the NEQ predicate on the "resource_version" field. +func ResourceVersionNEQ(v string) predicate.Connector { + return predicate.Connector(sql.FieldNEQ(FieldResourceVersion, v)) +} + +// ResourceVersionIn applies the In predicate on the "resource_version" field. +func ResourceVersionIn(vs ...string) predicate.Connector { + return predicate.Connector(sql.FieldIn(FieldResourceVersion, vs...)) +} + +// ResourceVersionNotIn applies the NotIn predicate on the "resource_version" field. +func ResourceVersionNotIn(vs ...string) predicate.Connector { + return predicate.Connector(sql.FieldNotIn(FieldResourceVersion, vs...)) +} + +// ResourceVersionGT applies the GT predicate on the "resource_version" field. +func ResourceVersionGT(v string) predicate.Connector { + return predicate.Connector(sql.FieldGT(FieldResourceVersion, v)) +} + +// ResourceVersionGTE applies the GTE predicate on the "resource_version" field. +func ResourceVersionGTE(v string) predicate.Connector { + return predicate.Connector(sql.FieldGTE(FieldResourceVersion, v)) +} + +// ResourceVersionLT applies the LT predicate on the "resource_version" field. +func ResourceVersionLT(v string) predicate.Connector { + return predicate.Connector(sql.FieldLT(FieldResourceVersion, v)) +} + +// ResourceVersionLTE applies the LTE predicate on the "resource_version" field. +func ResourceVersionLTE(v string) predicate.Connector { + return predicate.Connector(sql.FieldLTE(FieldResourceVersion, v)) +} + +// ResourceVersionContains applies the Contains predicate on the "resource_version" field. +func ResourceVersionContains(v string) predicate.Connector { + return predicate.Connector(sql.FieldContains(FieldResourceVersion, v)) +} + +// ResourceVersionHasPrefix applies the HasPrefix predicate on the "resource_version" field. +func ResourceVersionHasPrefix(v string) predicate.Connector { + return predicate.Connector(sql.FieldHasPrefix(FieldResourceVersion, v)) +} + +// ResourceVersionHasSuffix applies the HasSuffix predicate on the "resource_version" field. +func ResourceVersionHasSuffix(v string) predicate.Connector { + return predicate.Connector(sql.FieldHasSuffix(FieldResourceVersion, v)) +} + +// ResourceVersionEqualFold applies the EqualFold predicate on the "resource_version" field. +func ResourceVersionEqualFold(v string) predicate.Connector { + return predicate.Connector(sql.FieldEqualFold(FieldResourceVersion, v)) +} + +// ResourceVersionContainsFold applies the ContainsFold predicate on the "resource_version" field. +func ResourceVersionContainsFold(v string) predicate.Connector { + return predicate.Connector(sql.FieldContainsFold(FieldResourceVersion, v)) +} + +// ConfigEQ applies the EQ predicate on the "config" field. +func ConfigEQ(v []byte) predicate.Connector { + return predicate.Connector(sql.FieldEQ(FieldConfig, v)) +} + +// ConfigNEQ applies the NEQ predicate on the "config" field. +func ConfigNEQ(v []byte) predicate.Connector { + return predicate.Connector(sql.FieldNEQ(FieldConfig, v)) +} + +// ConfigIn applies the In predicate on the "config" field. +func ConfigIn(vs ...[]byte) predicate.Connector { + return predicate.Connector(sql.FieldIn(FieldConfig, vs...)) +} + +// ConfigNotIn applies the NotIn predicate on the "config" field. +func ConfigNotIn(vs ...[]byte) predicate.Connector { + return predicate.Connector(sql.FieldNotIn(FieldConfig, vs...)) +} + +// ConfigGT applies the GT predicate on the "config" field. +func ConfigGT(v []byte) predicate.Connector { + return predicate.Connector(sql.FieldGT(FieldConfig, v)) +} + +// ConfigGTE applies the GTE predicate on the "config" field. +func ConfigGTE(v []byte) predicate.Connector { + return predicate.Connector(sql.FieldGTE(FieldConfig, v)) +} + +// ConfigLT applies the LT predicate on the "config" field. +func ConfigLT(v []byte) predicate.Connector { + return predicate.Connector(sql.FieldLT(FieldConfig, v)) +} + +// ConfigLTE applies the LTE predicate on the "config" field. +func ConfigLTE(v []byte) predicate.Connector { + return predicate.Connector(sql.FieldLTE(FieldConfig, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Connector) predicate.Connector { + return predicate.Connector(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Connector) predicate.Connector { + return predicate.Connector(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Connector) predicate.Connector { + return predicate.Connector(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/connector_create.go b/vendor/github.com/dexidp/dex/storage/ent/db/connector_create.go new file mode 100644 index 00000000..fda13214 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/connector_create.go @@ -0,0 +1,244 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/connector" +) + +// ConnectorCreate is the builder for creating a Connector entity. +type ConnectorCreate struct { + config + mutation *ConnectorMutation + hooks []Hook +} + +// SetType sets the "type" field. +func (cc *ConnectorCreate) SetType(s string) *ConnectorCreate { + cc.mutation.SetType(s) + return cc +} + +// SetName sets the "name" field. +func (cc *ConnectorCreate) SetName(s string) *ConnectorCreate { + cc.mutation.SetName(s) + return cc +} + +// SetResourceVersion sets the "resource_version" field. +func (cc *ConnectorCreate) SetResourceVersion(s string) *ConnectorCreate { + cc.mutation.SetResourceVersion(s) + return cc +} + +// SetConfig sets the "config" field. +func (cc *ConnectorCreate) SetConfig(b []byte) *ConnectorCreate { + cc.mutation.SetConfig(b) + return cc +} + +// SetID sets the "id" field. +func (cc *ConnectorCreate) SetID(s string) *ConnectorCreate { + cc.mutation.SetID(s) + return cc +} + +// Mutation returns the ConnectorMutation object of the builder. +func (cc *ConnectorCreate) Mutation() *ConnectorMutation { + return cc.mutation +} + +// Save creates the Connector in the database. +func (cc *ConnectorCreate) Save(ctx context.Context) (*Connector, error) { + return withHooks(ctx, cc.sqlSave, cc.mutation, cc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (cc *ConnectorCreate) SaveX(ctx context.Context) *Connector { + v, err := cc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (cc *ConnectorCreate) Exec(ctx context.Context) error { + _, err := cc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cc *ConnectorCreate) ExecX(ctx context.Context) { + if err := cc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cc *ConnectorCreate) check() error { + if _, ok := cc.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New(`db: missing required field "Connector.type"`)} + } + if v, ok := cc.mutation.GetType(); ok { + if err := connector.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`db: validator failed for field "Connector.type": %w`, err)} + } + } + if _, ok := cc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`db: missing required field "Connector.name"`)} + } + if v, ok := cc.mutation.Name(); ok { + if err := connector.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`db: validator failed for field "Connector.name": %w`, err)} + } + } + if _, ok := cc.mutation.ResourceVersion(); !ok { + return &ValidationError{Name: "resource_version", err: errors.New(`db: missing required field "Connector.resource_version"`)} + } + if _, ok := cc.mutation.Config(); !ok { + return &ValidationError{Name: "config", err: errors.New(`db: missing required field "Connector.config"`)} + } + if v, ok := cc.mutation.ID(); ok { + if err := connector.IDValidator(v); err != nil { + return &ValidationError{Name: "id", err: fmt.Errorf(`db: validator failed for field "Connector.id": %w`, err)} + } + } + return nil +} + +func (cc *ConnectorCreate) sqlSave(ctx context.Context) (*Connector, error) { + if err := cc.check(); err != nil { + return nil, err + } + _node, _spec := cc.createSpec() + if err := sqlgraph.CreateNode(ctx, cc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected Connector.ID type: %T", _spec.ID.Value) + } + } + cc.mutation.id = &_node.ID + cc.mutation.done = true + return _node, nil +} + +func (cc *ConnectorCreate) createSpec() (*Connector, *sqlgraph.CreateSpec) { + var ( + _node = &Connector{config: cc.config} + _spec = sqlgraph.NewCreateSpec(connector.Table, sqlgraph.NewFieldSpec(connector.FieldID, field.TypeString)) + ) + if id, ok := cc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := cc.mutation.GetType(); ok { + _spec.SetField(connector.FieldType, field.TypeString, value) + _node.Type = value + } + if value, ok := cc.mutation.Name(); ok { + _spec.SetField(connector.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := cc.mutation.ResourceVersion(); ok { + _spec.SetField(connector.FieldResourceVersion, field.TypeString, value) + _node.ResourceVersion = value + } + if value, ok := cc.mutation.Config(); ok { + _spec.SetField(connector.FieldConfig, field.TypeBytes, value) + _node.Config = value + } + return _node, _spec +} + +// ConnectorCreateBulk is the builder for creating many Connector entities in bulk. +type ConnectorCreateBulk struct { + config + builders []*ConnectorCreate +} + +// Save creates the Connector entities in the database. +func (ccb *ConnectorCreateBulk) Save(ctx context.Context) ([]*Connector, error) { + specs := make([]*sqlgraph.CreateSpec, len(ccb.builders)) + nodes := make([]*Connector, len(ccb.builders)) + mutators := make([]Mutator, len(ccb.builders)) + for i := range ccb.builders { + func(i int, root context.Context) { + builder := ccb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ConnectorMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ccb *ConnectorCreateBulk) SaveX(ctx context.Context) []*Connector { + v, err := ccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ccb *ConnectorCreateBulk) Exec(ctx context.Context) error { + _, err := ccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ccb *ConnectorCreateBulk) ExecX(ctx context.Context) { + if err := ccb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/connector_delete.go b/vendor/github.com/dexidp/dex/storage/ent/db/connector_delete.go new file mode 100644 index 00000000..f7f3ed1e --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/connector_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/connector" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ConnectorDelete is the builder for deleting a Connector entity. +type ConnectorDelete struct { + config + hooks []Hook + mutation *ConnectorMutation +} + +// Where appends a list predicates to the ConnectorDelete builder. +func (cd *ConnectorDelete) Where(ps ...predicate.Connector) *ConnectorDelete { + cd.mutation.Where(ps...) + return cd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (cd *ConnectorDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, cd.sqlExec, cd.mutation, cd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (cd *ConnectorDelete) ExecX(ctx context.Context) int { + n, err := cd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (cd *ConnectorDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(connector.Table, sqlgraph.NewFieldSpec(connector.FieldID, field.TypeString)) + if ps := cd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, cd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + cd.mutation.done = true + return affected, err +} + +// ConnectorDeleteOne is the builder for deleting a single Connector entity. +type ConnectorDeleteOne struct { + cd *ConnectorDelete +} + +// Where appends a list predicates to the ConnectorDelete builder. +func (cdo *ConnectorDeleteOne) Where(ps ...predicate.Connector) *ConnectorDeleteOne { + cdo.cd.mutation.Where(ps...) + return cdo +} + +// Exec executes the deletion query. +func (cdo *ConnectorDeleteOne) Exec(ctx context.Context) error { + n, err := cdo.cd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{connector.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (cdo *ConnectorDeleteOne) ExecX(ctx context.Context) { + if err := cdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/connector_query.go b/vendor/github.com/dexidp/dex/storage/ent/db/connector_query.go new file mode 100644 index 00000000..990af2af --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/connector_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/connector" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ConnectorQuery is the builder for querying Connector entities. +type ConnectorQuery struct { + config + ctx *QueryContext + order []connector.OrderOption + inters []Interceptor + predicates []predicate.Connector + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ConnectorQuery builder. +func (cq *ConnectorQuery) Where(ps ...predicate.Connector) *ConnectorQuery { + cq.predicates = append(cq.predicates, ps...) + return cq +} + +// Limit the number of records to be returned by this query. +func (cq *ConnectorQuery) Limit(limit int) *ConnectorQuery { + cq.ctx.Limit = &limit + return cq +} + +// Offset to start from. +func (cq *ConnectorQuery) Offset(offset int) *ConnectorQuery { + cq.ctx.Offset = &offset + return cq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (cq *ConnectorQuery) Unique(unique bool) *ConnectorQuery { + cq.ctx.Unique = &unique + return cq +} + +// Order specifies how the records should be ordered. +func (cq *ConnectorQuery) Order(o ...connector.OrderOption) *ConnectorQuery { + cq.order = append(cq.order, o...) + return cq +} + +// First returns the first Connector entity from the query. +// Returns a *NotFoundError when no Connector was found. +func (cq *ConnectorQuery) First(ctx context.Context) (*Connector, error) { + nodes, err := cq.Limit(1).All(setContextOp(ctx, cq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{connector.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (cq *ConnectorQuery) FirstX(ctx context.Context) *Connector { + node, err := cq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Connector ID from the query. +// Returns a *NotFoundError when no Connector ID was found. +func (cq *ConnectorQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = cq.Limit(1).IDs(setContextOp(ctx, cq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{connector.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (cq *ConnectorQuery) FirstIDX(ctx context.Context) string { + id, err := cq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Connector entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Connector entity is found. +// Returns a *NotFoundError when no Connector entities are found. +func (cq *ConnectorQuery) Only(ctx context.Context) (*Connector, error) { + nodes, err := cq.Limit(2).All(setContextOp(ctx, cq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{connector.Label} + default: + return nil, &NotSingularError{connector.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (cq *ConnectorQuery) OnlyX(ctx context.Context) *Connector { + node, err := cq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Connector ID in the query. +// Returns a *NotSingularError when more than one Connector ID is found. +// Returns a *NotFoundError when no entities are found. +func (cq *ConnectorQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = cq.Limit(2).IDs(setContextOp(ctx, cq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{connector.Label} + default: + err = &NotSingularError{connector.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (cq *ConnectorQuery) OnlyIDX(ctx context.Context) string { + id, err := cq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Connectors. +func (cq *ConnectorQuery) All(ctx context.Context) ([]*Connector, error) { + ctx = setContextOp(ctx, cq.ctx, "All") + if err := cq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Connector, *ConnectorQuery]() + return withInterceptors[[]*Connector](ctx, cq, qr, cq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (cq *ConnectorQuery) AllX(ctx context.Context) []*Connector { + nodes, err := cq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Connector IDs. +func (cq *ConnectorQuery) IDs(ctx context.Context) (ids []string, err error) { + if cq.ctx.Unique == nil && cq.path != nil { + cq.Unique(true) + } + ctx = setContextOp(ctx, cq.ctx, "IDs") + if err = cq.Select(connector.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (cq *ConnectorQuery) IDsX(ctx context.Context) []string { + ids, err := cq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (cq *ConnectorQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, cq.ctx, "Count") + if err := cq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, cq, querierCount[*ConnectorQuery](), cq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (cq *ConnectorQuery) CountX(ctx context.Context) int { + count, err := cq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (cq *ConnectorQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, cq.ctx, "Exist") + switch _, err := cq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (cq *ConnectorQuery) ExistX(ctx context.Context) bool { + exist, err := cq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ConnectorQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (cq *ConnectorQuery) Clone() *ConnectorQuery { + if cq == nil { + return nil + } + return &ConnectorQuery{ + config: cq.config, + ctx: cq.ctx.Clone(), + order: append([]connector.OrderOption{}, cq.order...), + inters: append([]Interceptor{}, cq.inters...), + predicates: append([]predicate.Connector{}, cq.predicates...), + // clone intermediate query. + sql: cq.sql.Clone(), + path: cq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Type string `json:"type,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Connector.Query(). +// GroupBy(connector.FieldType). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (cq *ConnectorQuery) GroupBy(field string, fields ...string) *ConnectorGroupBy { + cq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ConnectorGroupBy{build: cq} + grbuild.flds = &cq.ctx.Fields + grbuild.label = connector.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Type string `json:"type,omitempty"` +// } +// +// client.Connector.Query(). +// Select(connector.FieldType). +// Scan(ctx, &v) +func (cq *ConnectorQuery) Select(fields ...string) *ConnectorSelect { + cq.ctx.Fields = append(cq.ctx.Fields, fields...) + sbuild := &ConnectorSelect{ConnectorQuery: cq} + sbuild.label = connector.Label + sbuild.flds, sbuild.scan = &cq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ConnectorSelect configured with the given aggregations. +func (cq *ConnectorQuery) Aggregate(fns ...AggregateFunc) *ConnectorSelect { + return cq.Select().Aggregate(fns...) +} + +func (cq *ConnectorQuery) prepareQuery(ctx context.Context) error { + for _, inter := range cq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, cq); err != nil { + return err + } + } + } + for _, f := range cq.ctx.Fields { + if !connector.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if cq.path != nil { + prev, err := cq.path(ctx) + if err != nil { + return err + } + cq.sql = prev + } + return nil +} + +func (cq *ConnectorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Connector, error) { + var ( + nodes = []*Connector{} + _spec = cq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Connector).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Connector{config: cq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, cq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (cq *ConnectorQuery) sqlCount(ctx context.Context) (int, error) { + _spec := cq.querySpec() + _spec.Node.Columns = cq.ctx.Fields + if len(cq.ctx.Fields) > 0 { + _spec.Unique = cq.ctx.Unique != nil && *cq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, cq.driver, _spec) +} + +func (cq *ConnectorQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(connector.Table, connector.Columns, sqlgraph.NewFieldSpec(connector.FieldID, field.TypeString)) + _spec.From = cq.sql + if unique := cq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if cq.path != nil { + _spec.Unique = true + } + if fields := cq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, connector.FieldID) + for i := range fields { + if fields[i] != connector.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := cq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := cq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := cq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := cq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (cq *ConnectorQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(cq.driver.Dialect()) + t1 := builder.Table(connector.Table) + columns := cq.ctx.Fields + if len(columns) == 0 { + columns = connector.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if cq.sql != nil { + selector = cq.sql + selector.Select(selector.Columns(columns...)...) + } + if cq.ctx.Unique != nil && *cq.ctx.Unique { + selector.Distinct() + } + for _, p := range cq.predicates { + p(selector) + } + for _, p := range cq.order { + p(selector) + } + if offset := cq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := cq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ConnectorGroupBy is the group-by builder for Connector entities. +type ConnectorGroupBy struct { + selector + build *ConnectorQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (cgb *ConnectorGroupBy) Aggregate(fns ...AggregateFunc) *ConnectorGroupBy { + cgb.fns = append(cgb.fns, fns...) + return cgb +} + +// Scan applies the selector query and scans the result into the given value. +func (cgb *ConnectorGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cgb.build.ctx, "GroupBy") + if err := cgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ConnectorQuery, *ConnectorGroupBy](ctx, cgb.build, cgb, cgb.build.inters, v) +} + +func (cgb *ConnectorGroupBy) sqlScan(ctx context.Context, root *ConnectorQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(cgb.fns)) + for _, fn := range cgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*cgb.flds)+len(cgb.fns)) + for _, f := range *cgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*cgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ConnectorSelect is the builder for selecting fields of Connector entities. +type ConnectorSelect struct { + *ConnectorQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (cs *ConnectorSelect) Aggregate(fns ...AggregateFunc) *ConnectorSelect { + cs.fns = append(cs.fns, fns...) + return cs +} + +// Scan applies the selector query and scans the result into the given value. +func (cs *ConnectorSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cs.ctx, "Select") + if err := cs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ConnectorQuery, *ConnectorSelect](ctx, cs.ConnectorQuery, cs, cs.inters, v) +} + +func (cs *ConnectorSelect) sqlScan(ctx context.Context, root *ConnectorQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(cs.fns)) + for _, fn := range cs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*cs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/connector_update.go b/vendor/github.com/dexidp/dex/storage/ent/db/connector_update.go new file mode 100644 index 00000000..035fe9c1 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/connector_update.go @@ -0,0 +1,283 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/connector" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ConnectorUpdate is the builder for updating Connector entities. +type ConnectorUpdate struct { + config + hooks []Hook + mutation *ConnectorMutation +} + +// Where appends a list predicates to the ConnectorUpdate builder. +func (cu *ConnectorUpdate) Where(ps ...predicate.Connector) *ConnectorUpdate { + cu.mutation.Where(ps...) + return cu +} + +// SetType sets the "type" field. +func (cu *ConnectorUpdate) SetType(s string) *ConnectorUpdate { + cu.mutation.SetType(s) + return cu +} + +// SetName sets the "name" field. +func (cu *ConnectorUpdate) SetName(s string) *ConnectorUpdate { + cu.mutation.SetName(s) + return cu +} + +// SetResourceVersion sets the "resource_version" field. +func (cu *ConnectorUpdate) SetResourceVersion(s string) *ConnectorUpdate { + cu.mutation.SetResourceVersion(s) + return cu +} + +// SetConfig sets the "config" field. +func (cu *ConnectorUpdate) SetConfig(b []byte) *ConnectorUpdate { + cu.mutation.SetConfig(b) + return cu +} + +// Mutation returns the ConnectorMutation object of the builder. +func (cu *ConnectorUpdate) Mutation() *ConnectorMutation { + return cu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (cu *ConnectorUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, cu.sqlSave, cu.mutation, cu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cu *ConnectorUpdate) SaveX(ctx context.Context) int { + affected, err := cu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (cu *ConnectorUpdate) Exec(ctx context.Context) error { + _, err := cu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cu *ConnectorUpdate) ExecX(ctx context.Context) { + if err := cu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cu *ConnectorUpdate) check() error { + if v, ok := cu.mutation.GetType(); ok { + if err := connector.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`db: validator failed for field "Connector.type": %w`, err)} + } + } + if v, ok := cu.mutation.Name(); ok { + if err := connector.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`db: validator failed for field "Connector.name": %w`, err)} + } + } + return nil +} + +func (cu *ConnectorUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := cu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(connector.Table, connector.Columns, sqlgraph.NewFieldSpec(connector.FieldID, field.TypeString)) + if ps := cu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cu.mutation.GetType(); ok { + _spec.SetField(connector.FieldType, field.TypeString, value) + } + if value, ok := cu.mutation.Name(); ok { + _spec.SetField(connector.FieldName, field.TypeString, value) + } + if value, ok := cu.mutation.ResourceVersion(); ok { + _spec.SetField(connector.FieldResourceVersion, field.TypeString, value) + } + if value, ok := cu.mutation.Config(); ok { + _spec.SetField(connector.FieldConfig, field.TypeBytes, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, cu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{connector.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + cu.mutation.done = true + return n, nil +} + +// ConnectorUpdateOne is the builder for updating a single Connector entity. +type ConnectorUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ConnectorMutation +} + +// SetType sets the "type" field. +func (cuo *ConnectorUpdateOne) SetType(s string) *ConnectorUpdateOne { + cuo.mutation.SetType(s) + return cuo +} + +// SetName sets the "name" field. +func (cuo *ConnectorUpdateOne) SetName(s string) *ConnectorUpdateOne { + cuo.mutation.SetName(s) + return cuo +} + +// SetResourceVersion sets the "resource_version" field. +func (cuo *ConnectorUpdateOne) SetResourceVersion(s string) *ConnectorUpdateOne { + cuo.mutation.SetResourceVersion(s) + return cuo +} + +// SetConfig sets the "config" field. +func (cuo *ConnectorUpdateOne) SetConfig(b []byte) *ConnectorUpdateOne { + cuo.mutation.SetConfig(b) + return cuo +} + +// Mutation returns the ConnectorMutation object of the builder. +func (cuo *ConnectorUpdateOne) Mutation() *ConnectorMutation { + return cuo.mutation +} + +// Where appends a list predicates to the ConnectorUpdate builder. +func (cuo *ConnectorUpdateOne) Where(ps ...predicate.Connector) *ConnectorUpdateOne { + cuo.mutation.Where(ps...) + return cuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (cuo *ConnectorUpdateOne) Select(field string, fields ...string) *ConnectorUpdateOne { + cuo.fields = append([]string{field}, fields...) + return cuo +} + +// Save executes the query and returns the updated Connector entity. +func (cuo *ConnectorUpdateOne) Save(ctx context.Context) (*Connector, error) { + return withHooks(ctx, cuo.sqlSave, cuo.mutation, cuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cuo *ConnectorUpdateOne) SaveX(ctx context.Context) *Connector { + node, err := cuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (cuo *ConnectorUpdateOne) Exec(ctx context.Context) error { + _, err := cuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cuo *ConnectorUpdateOne) ExecX(ctx context.Context) { + if err := cuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cuo *ConnectorUpdateOne) check() error { + if v, ok := cuo.mutation.GetType(); ok { + if err := connector.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`db: validator failed for field "Connector.type": %w`, err)} + } + } + if v, ok := cuo.mutation.Name(); ok { + if err := connector.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`db: validator failed for field "Connector.name": %w`, err)} + } + } + return nil +} + +func (cuo *ConnectorUpdateOne) sqlSave(ctx context.Context) (_node *Connector, err error) { + if err := cuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(connector.Table, connector.Columns, sqlgraph.NewFieldSpec(connector.FieldID, field.TypeString)) + id, ok := cuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "Connector.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := cuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, connector.FieldID) + for _, f := range fields { + if !connector.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != connector.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := cuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cuo.mutation.GetType(); ok { + _spec.SetField(connector.FieldType, field.TypeString, value) + } + if value, ok := cuo.mutation.Name(); ok { + _spec.SetField(connector.FieldName, field.TypeString, value) + } + if value, ok := cuo.mutation.ResourceVersion(); ok { + _spec.SetField(connector.FieldResourceVersion, field.TypeString, value) + } + if value, ok := cuo.mutation.Config(); ok { + _spec.SetField(connector.FieldConfig, field.TypeBytes, value) + } + _node = &Connector{config: cuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, cuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{connector.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + cuo.mutation.done = true + return _node, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest.go new file mode 100644 index 00000000..df0194bb --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest.go @@ -0,0 +1,166 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/devicerequest" +) + +// DeviceRequest is the model entity for the DeviceRequest schema. +type DeviceRequest struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // UserCode holds the value of the "user_code" field. + UserCode string `json:"user_code,omitempty"` + // DeviceCode holds the value of the "device_code" field. + DeviceCode string `json:"device_code,omitempty"` + // ClientID holds the value of the "client_id" field. + ClientID string `json:"client_id,omitempty"` + // ClientSecret holds the value of the "client_secret" field. + ClientSecret string `json:"client_secret,omitempty"` + // Scopes holds the value of the "scopes" field. + Scopes []string `json:"scopes,omitempty"` + // Expiry holds the value of the "expiry" field. + Expiry time.Time `json:"expiry,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*DeviceRequest) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case devicerequest.FieldScopes: + values[i] = new([]byte) + case devicerequest.FieldID: + values[i] = new(sql.NullInt64) + case devicerequest.FieldUserCode, devicerequest.FieldDeviceCode, devicerequest.FieldClientID, devicerequest.FieldClientSecret: + values[i] = new(sql.NullString) + case devicerequest.FieldExpiry: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the DeviceRequest fields. +func (dr *DeviceRequest) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case devicerequest.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + dr.ID = int(value.Int64) + case devicerequest.FieldUserCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field user_code", values[i]) + } else if value.Valid { + dr.UserCode = value.String + } + case devicerequest.FieldDeviceCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field device_code", values[i]) + } else if value.Valid { + dr.DeviceCode = value.String + } + case devicerequest.FieldClientID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field client_id", values[i]) + } else if value.Valid { + dr.ClientID = value.String + } + case devicerequest.FieldClientSecret: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field client_secret", values[i]) + } else if value.Valid { + dr.ClientSecret = value.String + } + case devicerequest.FieldScopes: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field scopes", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &dr.Scopes); err != nil { + return fmt.Errorf("unmarshal field scopes: %w", err) + } + } + case devicerequest.FieldExpiry: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expiry", values[i]) + } else if value.Valid { + dr.Expiry = value.Time + } + default: + dr.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the DeviceRequest. +// This includes values selected through modifiers, order, etc. +func (dr *DeviceRequest) Value(name string) (ent.Value, error) { + return dr.selectValues.Get(name) +} + +// Update returns a builder for updating this DeviceRequest. +// Note that you need to call DeviceRequest.Unwrap() before calling this method if this DeviceRequest +// was returned from a transaction, and the transaction was committed or rolled back. +func (dr *DeviceRequest) Update() *DeviceRequestUpdateOne { + return NewDeviceRequestClient(dr.config).UpdateOne(dr) +} + +// Unwrap unwraps the DeviceRequest entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (dr *DeviceRequest) Unwrap() *DeviceRequest { + _tx, ok := dr.config.driver.(*txDriver) + if !ok { + panic("db: DeviceRequest is not a transactional entity") + } + dr.config.driver = _tx.drv + return dr +} + +// String implements the fmt.Stringer. +func (dr *DeviceRequest) String() string { + var builder strings.Builder + builder.WriteString("DeviceRequest(") + builder.WriteString(fmt.Sprintf("id=%v, ", dr.ID)) + builder.WriteString("user_code=") + builder.WriteString(dr.UserCode) + builder.WriteString(", ") + builder.WriteString("device_code=") + builder.WriteString(dr.DeviceCode) + builder.WriteString(", ") + builder.WriteString("client_id=") + builder.WriteString(dr.ClientID) + builder.WriteString(", ") + builder.WriteString("client_secret=") + builder.WriteString(dr.ClientSecret) + builder.WriteString(", ") + builder.WriteString("scopes=") + builder.WriteString(fmt.Sprintf("%v", dr.Scopes)) + builder.WriteString(", ") + builder.WriteString("expiry=") + builder.WriteString(dr.Expiry.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// DeviceRequests is a parsable slice of DeviceRequest. +type DeviceRequests []*DeviceRequest diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/BUILD new file mode 100644 index 00000000..0bba04b2 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "devicerequest", + srcs = [ + "devicerequest.go", + "where.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest", + importpath = "github.com/dexidp/dex/storage/ent/db/devicerequest", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/devicerequest.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/devicerequest.go new file mode 100644 index 00000000..d27f3971 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/devicerequest.go @@ -0,0 +1,93 @@ +// Code generated by ent, DO NOT EDIT. + +package devicerequest + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the devicerequest type in the database. + Label = "device_request" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldUserCode holds the string denoting the user_code field in the database. + FieldUserCode = "user_code" + // FieldDeviceCode holds the string denoting the device_code field in the database. + FieldDeviceCode = "device_code" + // FieldClientID holds the string denoting the client_id field in the database. + FieldClientID = "client_id" + // FieldClientSecret holds the string denoting the client_secret field in the database. + FieldClientSecret = "client_secret" + // FieldScopes holds the string denoting the scopes field in the database. + FieldScopes = "scopes" + // FieldExpiry holds the string denoting the expiry field in the database. + FieldExpiry = "expiry" + // Table holds the table name of the devicerequest in the database. + Table = "device_requests" +) + +// Columns holds all SQL columns for devicerequest fields. +var Columns = []string{ + FieldID, + FieldUserCode, + FieldDeviceCode, + FieldClientID, + FieldClientSecret, + FieldScopes, + FieldExpiry, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // UserCodeValidator is a validator for the "user_code" field. It is called by the builders before save. + UserCodeValidator func(string) error + // DeviceCodeValidator is a validator for the "device_code" field. It is called by the builders before save. + DeviceCodeValidator func(string) error + // ClientIDValidator is a validator for the "client_id" field. It is called by the builders before save. + ClientIDValidator func(string) error + // ClientSecretValidator is a validator for the "client_secret" field. It is called by the builders before save. + ClientSecretValidator func(string) error +) + +// OrderOption defines the ordering options for the DeviceRequest queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByUserCode orders the results by the user_code field. +func ByUserCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserCode, opts...).ToFunc() +} + +// ByDeviceCode orders the results by the device_code field. +func ByDeviceCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeviceCode, opts...).ToFunc() +} + +// ByClientID orders the results by the client_id field. +func ByClientID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClientID, opts...).ToFunc() +} + +// ByClientSecret orders the results by the client_secret field. +func ByClientSecret(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClientSecret, opts...).ToFunc() +} + +// ByExpiry orders the results by the expiry field. +func ByExpiry(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiry, opts...).ToFunc() +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/where.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/where.go new file mode 100644 index 00000000..63400e24 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest/where.go @@ -0,0 +1,422 @@ +// Code generated by ent, DO NOT EDIT. + +package devicerequest + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLTE(FieldID, id)) +} + +// UserCode applies equality check predicate on the "user_code" field. It's identical to UserCodeEQ. +func UserCode(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldUserCode, v)) +} + +// DeviceCode applies equality check predicate on the "device_code" field. It's identical to DeviceCodeEQ. +func DeviceCode(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldDeviceCode, v)) +} + +// ClientID applies equality check predicate on the "client_id" field. It's identical to ClientIDEQ. +func ClientID(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldClientID, v)) +} + +// ClientSecret applies equality check predicate on the "client_secret" field. It's identical to ClientSecretEQ. +func ClientSecret(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldClientSecret, v)) +} + +// Expiry applies equality check predicate on the "expiry" field. It's identical to ExpiryEQ. +func Expiry(v time.Time) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldExpiry, v)) +} + +// UserCodeEQ applies the EQ predicate on the "user_code" field. +func UserCodeEQ(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldUserCode, v)) +} + +// UserCodeNEQ applies the NEQ predicate on the "user_code" field. +func UserCodeNEQ(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNEQ(FieldUserCode, v)) +} + +// UserCodeIn applies the In predicate on the "user_code" field. +func UserCodeIn(vs ...string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldIn(FieldUserCode, vs...)) +} + +// UserCodeNotIn applies the NotIn predicate on the "user_code" field. +func UserCodeNotIn(vs ...string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNotIn(FieldUserCode, vs...)) +} + +// UserCodeGT applies the GT predicate on the "user_code" field. +func UserCodeGT(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGT(FieldUserCode, v)) +} + +// UserCodeGTE applies the GTE predicate on the "user_code" field. +func UserCodeGTE(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGTE(FieldUserCode, v)) +} + +// UserCodeLT applies the LT predicate on the "user_code" field. +func UserCodeLT(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLT(FieldUserCode, v)) +} + +// UserCodeLTE applies the LTE predicate on the "user_code" field. +func UserCodeLTE(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLTE(FieldUserCode, v)) +} + +// UserCodeContains applies the Contains predicate on the "user_code" field. +func UserCodeContains(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldContains(FieldUserCode, v)) +} + +// UserCodeHasPrefix applies the HasPrefix predicate on the "user_code" field. +func UserCodeHasPrefix(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldHasPrefix(FieldUserCode, v)) +} + +// UserCodeHasSuffix applies the HasSuffix predicate on the "user_code" field. +func UserCodeHasSuffix(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldHasSuffix(FieldUserCode, v)) +} + +// UserCodeEqualFold applies the EqualFold predicate on the "user_code" field. +func UserCodeEqualFold(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEqualFold(FieldUserCode, v)) +} + +// UserCodeContainsFold applies the ContainsFold predicate on the "user_code" field. +func UserCodeContainsFold(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldContainsFold(FieldUserCode, v)) +} + +// DeviceCodeEQ applies the EQ predicate on the "device_code" field. +func DeviceCodeEQ(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldDeviceCode, v)) +} + +// DeviceCodeNEQ applies the NEQ predicate on the "device_code" field. +func DeviceCodeNEQ(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNEQ(FieldDeviceCode, v)) +} + +// DeviceCodeIn applies the In predicate on the "device_code" field. +func DeviceCodeIn(vs ...string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldIn(FieldDeviceCode, vs...)) +} + +// DeviceCodeNotIn applies the NotIn predicate on the "device_code" field. +func DeviceCodeNotIn(vs ...string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNotIn(FieldDeviceCode, vs...)) +} + +// DeviceCodeGT applies the GT predicate on the "device_code" field. +func DeviceCodeGT(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGT(FieldDeviceCode, v)) +} + +// DeviceCodeGTE applies the GTE predicate on the "device_code" field. +func DeviceCodeGTE(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGTE(FieldDeviceCode, v)) +} + +// DeviceCodeLT applies the LT predicate on the "device_code" field. +func DeviceCodeLT(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLT(FieldDeviceCode, v)) +} + +// DeviceCodeLTE applies the LTE predicate on the "device_code" field. +func DeviceCodeLTE(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLTE(FieldDeviceCode, v)) +} + +// DeviceCodeContains applies the Contains predicate on the "device_code" field. +func DeviceCodeContains(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldContains(FieldDeviceCode, v)) +} + +// DeviceCodeHasPrefix applies the HasPrefix predicate on the "device_code" field. +func DeviceCodeHasPrefix(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldHasPrefix(FieldDeviceCode, v)) +} + +// DeviceCodeHasSuffix applies the HasSuffix predicate on the "device_code" field. +func DeviceCodeHasSuffix(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldHasSuffix(FieldDeviceCode, v)) +} + +// DeviceCodeEqualFold applies the EqualFold predicate on the "device_code" field. +func DeviceCodeEqualFold(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEqualFold(FieldDeviceCode, v)) +} + +// DeviceCodeContainsFold applies the ContainsFold predicate on the "device_code" field. +func DeviceCodeContainsFold(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldContainsFold(FieldDeviceCode, v)) +} + +// ClientIDEQ applies the EQ predicate on the "client_id" field. +func ClientIDEQ(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldClientID, v)) +} + +// ClientIDNEQ applies the NEQ predicate on the "client_id" field. +func ClientIDNEQ(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNEQ(FieldClientID, v)) +} + +// ClientIDIn applies the In predicate on the "client_id" field. +func ClientIDIn(vs ...string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldIn(FieldClientID, vs...)) +} + +// ClientIDNotIn applies the NotIn predicate on the "client_id" field. +func ClientIDNotIn(vs ...string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNotIn(FieldClientID, vs...)) +} + +// ClientIDGT applies the GT predicate on the "client_id" field. +func ClientIDGT(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGT(FieldClientID, v)) +} + +// ClientIDGTE applies the GTE predicate on the "client_id" field. +func ClientIDGTE(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGTE(FieldClientID, v)) +} + +// ClientIDLT applies the LT predicate on the "client_id" field. +func ClientIDLT(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLT(FieldClientID, v)) +} + +// ClientIDLTE applies the LTE predicate on the "client_id" field. +func ClientIDLTE(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLTE(FieldClientID, v)) +} + +// ClientIDContains applies the Contains predicate on the "client_id" field. +func ClientIDContains(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldContains(FieldClientID, v)) +} + +// ClientIDHasPrefix applies the HasPrefix predicate on the "client_id" field. +func ClientIDHasPrefix(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldHasPrefix(FieldClientID, v)) +} + +// ClientIDHasSuffix applies the HasSuffix predicate on the "client_id" field. +func ClientIDHasSuffix(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldHasSuffix(FieldClientID, v)) +} + +// ClientIDEqualFold applies the EqualFold predicate on the "client_id" field. +func ClientIDEqualFold(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEqualFold(FieldClientID, v)) +} + +// ClientIDContainsFold applies the ContainsFold predicate on the "client_id" field. +func ClientIDContainsFold(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldContainsFold(FieldClientID, v)) +} + +// ClientSecretEQ applies the EQ predicate on the "client_secret" field. +func ClientSecretEQ(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldClientSecret, v)) +} + +// ClientSecretNEQ applies the NEQ predicate on the "client_secret" field. +func ClientSecretNEQ(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNEQ(FieldClientSecret, v)) +} + +// ClientSecretIn applies the In predicate on the "client_secret" field. +func ClientSecretIn(vs ...string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldIn(FieldClientSecret, vs...)) +} + +// ClientSecretNotIn applies the NotIn predicate on the "client_secret" field. +func ClientSecretNotIn(vs ...string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNotIn(FieldClientSecret, vs...)) +} + +// ClientSecretGT applies the GT predicate on the "client_secret" field. +func ClientSecretGT(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGT(FieldClientSecret, v)) +} + +// ClientSecretGTE applies the GTE predicate on the "client_secret" field. +func ClientSecretGTE(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGTE(FieldClientSecret, v)) +} + +// ClientSecretLT applies the LT predicate on the "client_secret" field. +func ClientSecretLT(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLT(FieldClientSecret, v)) +} + +// ClientSecretLTE applies the LTE predicate on the "client_secret" field. +func ClientSecretLTE(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLTE(FieldClientSecret, v)) +} + +// ClientSecretContains applies the Contains predicate on the "client_secret" field. +func ClientSecretContains(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldContains(FieldClientSecret, v)) +} + +// ClientSecretHasPrefix applies the HasPrefix predicate on the "client_secret" field. +func ClientSecretHasPrefix(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldHasPrefix(FieldClientSecret, v)) +} + +// ClientSecretHasSuffix applies the HasSuffix predicate on the "client_secret" field. +func ClientSecretHasSuffix(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldHasSuffix(FieldClientSecret, v)) +} + +// ClientSecretEqualFold applies the EqualFold predicate on the "client_secret" field. +func ClientSecretEqualFold(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEqualFold(FieldClientSecret, v)) +} + +// ClientSecretContainsFold applies the ContainsFold predicate on the "client_secret" field. +func ClientSecretContainsFold(v string) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldContainsFold(FieldClientSecret, v)) +} + +// ScopesIsNil applies the IsNil predicate on the "scopes" field. +func ScopesIsNil() predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldIsNull(FieldScopes)) +} + +// ScopesNotNil applies the NotNil predicate on the "scopes" field. +func ScopesNotNil() predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNotNull(FieldScopes)) +} + +// ExpiryEQ applies the EQ predicate on the "expiry" field. +func ExpiryEQ(v time.Time) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldEQ(FieldExpiry, v)) +} + +// ExpiryNEQ applies the NEQ predicate on the "expiry" field. +func ExpiryNEQ(v time.Time) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNEQ(FieldExpiry, v)) +} + +// ExpiryIn applies the In predicate on the "expiry" field. +func ExpiryIn(vs ...time.Time) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldIn(FieldExpiry, vs...)) +} + +// ExpiryNotIn applies the NotIn predicate on the "expiry" field. +func ExpiryNotIn(vs ...time.Time) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldNotIn(FieldExpiry, vs...)) +} + +// ExpiryGT applies the GT predicate on the "expiry" field. +func ExpiryGT(v time.Time) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGT(FieldExpiry, v)) +} + +// ExpiryGTE applies the GTE predicate on the "expiry" field. +func ExpiryGTE(v time.Time) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldGTE(FieldExpiry, v)) +} + +// ExpiryLT applies the LT predicate on the "expiry" field. +func ExpiryLT(v time.Time) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLT(FieldExpiry, v)) +} + +// ExpiryLTE applies the LTE predicate on the "expiry" field. +func ExpiryLTE(v time.Time) predicate.DeviceRequest { + return predicate.DeviceRequest(sql.FieldLTE(FieldExpiry, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.DeviceRequest) predicate.DeviceRequest { + return predicate.DeviceRequest(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.DeviceRequest) predicate.DeviceRequest { + return predicate.DeviceRequest(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.DeviceRequest) predicate.DeviceRequest { + return predicate.DeviceRequest(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_create.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_create.go new file mode 100644 index 00000000..5b182d24 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_create.go @@ -0,0 +1,262 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/devicerequest" +) + +// DeviceRequestCreate is the builder for creating a DeviceRequest entity. +type DeviceRequestCreate struct { + config + mutation *DeviceRequestMutation + hooks []Hook +} + +// SetUserCode sets the "user_code" field. +func (drc *DeviceRequestCreate) SetUserCode(s string) *DeviceRequestCreate { + drc.mutation.SetUserCode(s) + return drc +} + +// SetDeviceCode sets the "device_code" field. +func (drc *DeviceRequestCreate) SetDeviceCode(s string) *DeviceRequestCreate { + drc.mutation.SetDeviceCode(s) + return drc +} + +// SetClientID sets the "client_id" field. +func (drc *DeviceRequestCreate) SetClientID(s string) *DeviceRequestCreate { + drc.mutation.SetClientID(s) + return drc +} + +// SetClientSecret sets the "client_secret" field. +func (drc *DeviceRequestCreate) SetClientSecret(s string) *DeviceRequestCreate { + drc.mutation.SetClientSecret(s) + return drc +} + +// SetScopes sets the "scopes" field. +func (drc *DeviceRequestCreate) SetScopes(s []string) *DeviceRequestCreate { + drc.mutation.SetScopes(s) + return drc +} + +// SetExpiry sets the "expiry" field. +func (drc *DeviceRequestCreate) SetExpiry(t time.Time) *DeviceRequestCreate { + drc.mutation.SetExpiry(t) + return drc +} + +// Mutation returns the DeviceRequestMutation object of the builder. +func (drc *DeviceRequestCreate) Mutation() *DeviceRequestMutation { + return drc.mutation +} + +// Save creates the DeviceRequest in the database. +func (drc *DeviceRequestCreate) Save(ctx context.Context) (*DeviceRequest, error) { + return withHooks(ctx, drc.sqlSave, drc.mutation, drc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (drc *DeviceRequestCreate) SaveX(ctx context.Context) *DeviceRequest { + v, err := drc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (drc *DeviceRequestCreate) Exec(ctx context.Context) error { + _, err := drc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (drc *DeviceRequestCreate) ExecX(ctx context.Context) { + if err := drc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (drc *DeviceRequestCreate) check() error { + if _, ok := drc.mutation.UserCode(); !ok { + return &ValidationError{Name: "user_code", err: errors.New(`db: missing required field "DeviceRequest.user_code"`)} + } + if v, ok := drc.mutation.UserCode(); ok { + if err := devicerequest.UserCodeValidator(v); err != nil { + return &ValidationError{Name: "user_code", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.user_code": %w`, err)} + } + } + if _, ok := drc.mutation.DeviceCode(); !ok { + return &ValidationError{Name: "device_code", err: errors.New(`db: missing required field "DeviceRequest.device_code"`)} + } + if v, ok := drc.mutation.DeviceCode(); ok { + if err := devicerequest.DeviceCodeValidator(v); err != nil { + return &ValidationError{Name: "device_code", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.device_code": %w`, err)} + } + } + if _, ok := drc.mutation.ClientID(); !ok { + return &ValidationError{Name: "client_id", err: errors.New(`db: missing required field "DeviceRequest.client_id"`)} + } + if v, ok := drc.mutation.ClientID(); ok { + if err := devicerequest.ClientIDValidator(v); err != nil { + return &ValidationError{Name: "client_id", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.client_id": %w`, err)} + } + } + if _, ok := drc.mutation.ClientSecret(); !ok { + return &ValidationError{Name: "client_secret", err: errors.New(`db: missing required field "DeviceRequest.client_secret"`)} + } + if v, ok := drc.mutation.ClientSecret(); ok { + if err := devicerequest.ClientSecretValidator(v); err != nil { + return &ValidationError{Name: "client_secret", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.client_secret": %w`, err)} + } + } + if _, ok := drc.mutation.Expiry(); !ok { + return &ValidationError{Name: "expiry", err: errors.New(`db: missing required field "DeviceRequest.expiry"`)} + } + return nil +} + +func (drc *DeviceRequestCreate) sqlSave(ctx context.Context) (*DeviceRequest, error) { + if err := drc.check(); err != nil { + return nil, err + } + _node, _spec := drc.createSpec() + if err := sqlgraph.CreateNode(ctx, drc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + drc.mutation.id = &_node.ID + drc.mutation.done = true + return _node, nil +} + +func (drc *DeviceRequestCreate) createSpec() (*DeviceRequest, *sqlgraph.CreateSpec) { + var ( + _node = &DeviceRequest{config: drc.config} + _spec = sqlgraph.NewCreateSpec(devicerequest.Table, sqlgraph.NewFieldSpec(devicerequest.FieldID, field.TypeInt)) + ) + if value, ok := drc.mutation.UserCode(); ok { + _spec.SetField(devicerequest.FieldUserCode, field.TypeString, value) + _node.UserCode = value + } + if value, ok := drc.mutation.DeviceCode(); ok { + _spec.SetField(devicerequest.FieldDeviceCode, field.TypeString, value) + _node.DeviceCode = value + } + if value, ok := drc.mutation.ClientID(); ok { + _spec.SetField(devicerequest.FieldClientID, field.TypeString, value) + _node.ClientID = value + } + if value, ok := drc.mutation.ClientSecret(); ok { + _spec.SetField(devicerequest.FieldClientSecret, field.TypeString, value) + _node.ClientSecret = value + } + if value, ok := drc.mutation.Scopes(); ok { + _spec.SetField(devicerequest.FieldScopes, field.TypeJSON, value) + _node.Scopes = value + } + if value, ok := drc.mutation.Expiry(); ok { + _spec.SetField(devicerequest.FieldExpiry, field.TypeTime, value) + _node.Expiry = value + } + return _node, _spec +} + +// DeviceRequestCreateBulk is the builder for creating many DeviceRequest entities in bulk. +type DeviceRequestCreateBulk struct { + config + builders []*DeviceRequestCreate +} + +// Save creates the DeviceRequest entities in the database. +func (drcb *DeviceRequestCreateBulk) Save(ctx context.Context) ([]*DeviceRequest, error) { + specs := make([]*sqlgraph.CreateSpec, len(drcb.builders)) + nodes := make([]*DeviceRequest, len(drcb.builders)) + mutators := make([]Mutator, len(drcb.builders)) + for i := range drcb.builders { + func(i int, root context.Context) { + builder := drcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DeviceRequestMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, drcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, drcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, drcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (drcb *DeviceRequestCreateBulk) SaveX(ctx context.Context) []*DeviceRequest { + v, err := drcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (drcb *DeviceRequestCreateBulk) Exec(ctx context.Context) error { + _, err := drcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (drcb *DeviceRequestCreateBulk) ExecX(ctx context.Context) { + if err := drcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_delete.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_delete.go new file mode 100644 index 00000000..b92f7798 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/devicerequest" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// DeviceRequestDelete is the builder for deleting a DeviceRequest entity. +type DeviceRequestDelete struct { + config + hooks []Hook + mutation *DeviceRequestMutation +} + +// Where appends a list predicates to the DeviceRequestDelete builder. +func (drd *DeviceRequestDelete) Where(ps ...predicate.DeviceRequest) *DeviceRequestDelete { + drd.mutation.Where(ps...) + return drd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (drd *DeviceRequestDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, drd.sqlExec, drd.mutation, drd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (drd *DeviceRequestDelete) ExecX(ctx context.Context) int { + n, err := drd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (drd *DeviceRequestDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(devicerequest.Table, sqlgraph.NewFieldSpec(devicerequest.FieldID, field.TypeInt)) + if ps := drd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, drd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + drd.mutation.done = true + return affected, err +} + +// DeviceRequestDeleteOne is the builder for deleting a single DeviceRequest entity. +type DeviceRequestDeleteOne struct { + drd *DeviceRequestDelete +} + +// Where appends a list predicates to the DeviceRequestDelete builder. +func (drdo *DeviceRequestDeleteOne) Where(ps ...predicate.DeviceRequest) *DeviceRequestDeleteOne { + drdo.drd.mutation.Where(ps...) + return drdo +} + +// Exec executes the deletion query. +func (drdo *DeviceRequestDeleteOne) Exec(ctx context.Context) error { + n, err := drdo.drd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{devicerequest.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (drdo *DeviceRequestDeleteOne) ExecX(ctx context.Context) { + if err := drdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_query.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_query.go new file mode 100644 index 00000000..de6092dc --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/devicerequest" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// DeviceRequestQuery is the builder for querying DeviceRequest entities. +type DeviceRequestQuery struct { + config + ctx *QueryContext + order []devicerequest.OrderOption + inters []Interceptor + predicates []predicate.DeviceRequest + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DeviceRequestQuery builder. +func (drq *DeviceRequestQuery) Where(ps ...predicate.DeviceRequest) *DeviceRequestQuery { + drq.predicates = append(drq.predicates, ps...) + return drq +} + +// Limit the number of records to be returned by this query. +func (drq *DeviceRequestQuery) Limit(limit int) *DeviceRequestQuery { + drq.ctx.Limit = &limit + return drq +} + +// Offset to start from. +func (drq *DeviceRequestQuery) Offset(offset int) *DeviceRequestQuery { + drq.ctx.Offset = &offset + return drq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (drq *DeviceRequestQuery) Unique(unique bool) *DeviceRequestQuery { + drq.ctx.Unique = &unique + return drq +} + +// Order specifies how the records should be ordered. +func (drq *DeviceRequestQuery) Order(o ...devicerequest.OrderOption) *DeviceRequestQuery { + drq.order = append(drq.order, o...) + return drq +} + +// First returns the first DeviceRequest entity from the query. +// Returns a *NotFoundError when no DeviceRequest was found. +func (drq *DeviceRequestQuery) First(ctx context.Context) (*DeviceRequest, error) { + nodes, err := drq.Limit(1).All(setContextOp(ctx, drq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{devicerequest.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (drq *DeviceRequestQuery) FirstX(ctx context.Context) *DeviceRequest { + node, err := drq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first DeviceRequest ID from the query. +// Returns a *NotFoundError when no DeviceRequest ID was found. +func (drq *DeviceRequestQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = drq.Limit(1).IDs(setContextOp(ctx, drq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{devicerequest.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (drq *DeviceRequestQuery) FirstIDX(ctx context.Context) int { + id, err := drq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single DeviceRequest entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one DeviceRequest entity is found. +// Returns a *NotFoundError when no DeviceRequest entities are found. +func (drq *DeviceRequestQuery) Only(ctx context.Context) (*DeviceRequest, error) { + nodes, err := drq.Limit(2).All(setContextOp(ctx, drq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{devicerequest.Label} + default: + return nil, &NotSingularError{devicerequest.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (drq *DeviceRequestQuery) OnlyX(ctx context.Context) *DeviceRequest { + node, err := drq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only DeviceRequest ID in the query. +// Returns a *NotSingularError when more than one DeviceRequest ID is found. +// Returns a *NotFoundError when no entities are found. +func (drq *DeviceRequestQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = drq.Limit(2).IDs(setContextOp(ctx, drq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{devicerequest.Label} + default: + err = &NotSingularError{devicerequest.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (drq *DeviceRequestQuery) OnlyIDX(ctx context.Context) int { + id, err := drq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of DeviceRequests. +func (drq *DeviceRequestQuery) All(ctx context.Context) ([]*DeviceRequest, error) { + ctx = setContextOp(ctx, drq.ctx, "All") + if err := drq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*DeviceRequest, *DeviceRequestQuery]() + return withInterceptors[[]*DeviceRequest](ctx, drq, qr, drq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (drq *DeviceRequestQuery) AllX(ctx context.Context) []*DeviceRequest { + nodes, err := drq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of DeviceRequest IDs. +func (drq *DeviceRequestQuery) IDs(ctx context.Context) (ids []int, err error) { + if drq.ctx.Unique == nil && drq.path != nil { + drq.Unique(true) + } + ctx = setContextOp(ctx, drq.ctx, "IDs") + if err = drq.Select(devicerequest.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (drq *DeviceRequestQuery) IDsX(ctx context.Context) []int { + ids, err := drq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (drq *DeviceRequestQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, drq.ctx, "Count") + if err := drq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, drq, querierCount[*DeviceRequestQuery](), drq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (drq *DeviceRequestQuery) CountX(ctx context.Context) int { + count, err := drq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (drq *DeviceRequestQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, drq.ctx, "Exist") + switch _, err := drq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (drq *DeviceRequestQuery) ExistX(ctx context.Context) bool { + exist, err := drq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DeviceRequestQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (drq *DeviceRequestQuery) Clone() *DeviceRequestQuery { + if drq == nil { + return nil + } + return &DeviceRequestQuery{ + config: drq.config, + ctx: drq.ctx.Clone(), + order: append([]devicerequest.OrderOption{}, drq.order...), + inters: append([]Interceptor{}, drq.inters...), + predicates: append([]predicate.DeviceRequest{}, drq.predicates...), + // clone intermediate query. + sql: drq.sql.Clone(), + path: drq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// UserCode string `json:"user_code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.DeviceRequest.Query(). +// GroupBy(devicerequest.FieldUserCode). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (drq *DeviceRequestQuery) GroupBy(field string, fields ...string) *DeviceRequestGroupBy { + drq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DeviceRequestGroupBy{build: drq} + grbuild.flds = &drq.ctx.Fields + grbuild.label = devicerequest.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// UserCode string `json:"user_code,omitempty"` +// } +// +// client.DeviceRequest.Query(). +// Select(devicerequest.FieldUserCode). +// Scan(ctx, &v) +func (drq *DeviceRequestQuery) Select(fields ...string) *DeviceRequestSelect { + drq.ctx.Fields = append(drq.ctx.Fields, fields...) + sbuild := &DeviceRequestSelect{DeviceRequestQuery: drq} + sbuild.label = devicerequest.Label + sbuild.flds, sbuild.scan = &drq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DeviceRequestSelect configured with the given aggregations. +func (drq *DeviceRequestQuery) Aggregate(fns ...AggregateFunc) *DeviceRequestSelect { + return drq.Select().Aggregate(fns...) +} + +func (drq *DeviceRequestQuery) prepareQuery(ctx context.Context) error { + for _, inter := range drq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, drq); err != nil { + return err + } + } + } + for _, f := range drq.ctx.Fields { + if !devicerequest.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if drq.path != nil { + prev, err := drq.path(ctx) + if err != nil { + return err + } + drq.sql = prev + } + return nil +} + +func (drq *DeviceRequestQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DeviceRequest, error) { + var ( + nodes = []*DeviceRequest{} + _spec = drq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*DeviceRequest).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &DeviceRequest{config: drq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, drq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (drq *DeviceRequestQuery) sqlCount(ctx context.Context) (int, error) { + _spec := drq.querySpec() + _spec.Node.Columns = drq.ctx.Fields + if len(drq.ctx.Fields) > 0 { + _spec.Unique = drq.ctx.Unique != nil && *drq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, drq.driver, _spec) +} + +func (drq *DeviceRequestQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(devicerequest.Table, devicerequest.Columns, sqlgraph.NewFieldSpec(devicerequest.FieldID, field.TypeInt)) + _spec.From = drq.sql + if unique := drq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if drq.path != nil { + _spec.Unique = true + } + if fields := drq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, devicerequest.FieldID) + for i := range fields { + if fields[i] != devicerequest.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := drq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := drq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := drq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := drq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (drq *DeviceRequestQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(drq.driver.Dialect()) + t1 := builder.Table(devicerequest.Table) + columns := drq.ctx.Fields + if len(columns) == 0 { + columns = devicerequest.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if drq.sql != nil { + selector = drq.sql + selector.Select(selector.Columns(columns...)...) + } + if drq.ctx.Unique != nil && *drq.ctx.Unique { + selector.Distinct() + } + for _, p := range drq.predicates { + p(selector) + } + for _, p := range drq.order { + p(selector) + } + if offset := drq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := drq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DeviceRequestGroupBy is the group-by builder for DeviceRequest entities. +type DeviceRequestGroupBy struct { + selector + build *DeviceRequestQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (drgb *DeviceRequestGroupBy) Aggregate(fns ...AggregateFunc) *DeviceRequestGroupBy { + drgb.fns = append(drgb.fns, fns...) + return drgb +} + +// Scan applies the selector query and scans the result into the given value. +func (drgb *DeviceRequestGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, drgb.build.ctx, "GroupBy") + if err := drgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DeviceRequestQuery, *DeviceRequestGroupBy](ctx, drgb.build, drgb, drgb.build.inters, v) +} + +func (drgb *DeviceRequestGroupBy) sqlScan(ctx context.Context, root *DeviceRequestQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(drgb.fns)) + for _, fn := range drgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*drgb.flds)+len(drgb.fns)) + for _, f := range *drgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*drgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := drgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// DeviceRequestSelect is the builder for selecting fields of DeviceRequest entities. +type DeviceRequestSelect struct { + *DeviceRequestQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (drs *DeviceRequestSelect) Aggregate(fns ...AggregateFunc) *DeviceRequestSelect { + drs.fns = append(drs.fns, fns...) + return drs +} + +// Scan applies the selector query and scans the result into the given value. +func (drs *DeviceRequestSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, drs.ctx, "Select") + if err := drs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DeviceRequestQuery, *DeviceRequestSelect](ctx, drs.DeviceRequestQuery, drs, drs.inters, v) +} + +func (drs *DeviceRequestSelect) sqlScan(ctx context.Context, root *DeviceRequestQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(drs.fns)) + for _, fn := range drs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*drs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := drs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_update.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_update.go new file mode 100644 index 00000000..1b48c36e --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicerequest_update.go @@ -0,0 +1,381 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/devicerequest" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// DeviceRequestUpdate is the builder for updating DeviceRequest entities. +type DeviceRequestUpdate struct { + config + hooks []Hook + mutation *DeviceRequestMutation +} + +// Where appends a list predicates to the DeviceRequestUpdate builder. +func (dru *DeviceRequestUpdate) Where(ps ...predicate.DeviceRequest) *DeviceRequestUpdate { + dru.mutation.Where(ps...) + return dru +} + +// SetUserCode sets the "user_code" field. +func (dru *DeviceRequestUpdate) SetUserCode(s string) *DeviceRequestUpdate { + dru.mutation.SetUserCode(s) + return dru +} + +// SetDeviceCode sets the "device_code" field. +func (dru *DeviceRequestUpdate) SetDeviceCode(s string) *DeviceRequestUpdate { + dru.mutation.SetDeviceCode(s) + return dru +} + +// SetClientID sets the "client_id" field. +func (dru *DeviceRequestUpdate) SetClientID(s string) *DeviceRequestUpdate { + dru.mutation.SetClientID(s) + return dru +} + +// SetClientSecret sets the "client_secret" field. +func (dru *DeviceRequestUpdate) SetClientSecret(s string) *DeviceRequestUpdate { + dru.mutation.SetClientSecret(s) + return dru +} + +// SetScopes sets the "scopes" field. +func (dru *DeviceRequestUpdate) SetScopes(s []string) *DeviceRequestUpdate { + dru.mutation.SetScopes(s) + return dru +} + +// AppendScopes appends s to the "scopes" field. +func (dru *DeviceRequestUpdate) AppendScopes(s []string) *DeviceRequestUpdate { + dru.mutation.AppendScopes(s) + return dru +} + +// ClearScopes clears the value of the "scopes" field. +func (dru *DeviceRequestUpdate) ClearScopes() *DeviceRequestUpdate { + dru.mutation.ClearScopes() + return dru +} + +// SetExpiry sets the "expiry" field. +func (dru *DeviceRequestUpdate) SetExpiry(t time.Time) *DeviceRequestUpdate { + dru.mutation.SetExpiry(t) + return dru +} + +// Mutation returns the DeviceRequestMutation object of the builder. +func (dru *DeviceRequestUpdate) Mutation() *DeviceRequestMutation { + return dru.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (dru *DeviceRequestUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, dru.sqlSave, dru.mutation, dru.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (dru *DeviceRequestUpdate) SaveX(ctx context.Context) int { + affected, err := dru.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (dru *DeviceRequestUpdate) Exec(ctx context.Context) error { + _, err := dru.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dru *DeviceRequestUpdate) ExecX(ctx context.Context) { + if err := dru.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dru *DeviceRequestUpdate) check() error { + if v, ok := dru.mutation.UserCode(); ok { + if err := devicerequest.UserCodeValidator(v); err != nil { + return &ValidationError{Name: "user_code", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.user_code": %w`, err)} + } + } + if v, ok := dru.mutation.DeviceCode(); ok { + if err := devicerequest.DeviceCodeValidator(v); err != nil { + return &ValidationError{Name: "device_code", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.device_code": %w`, err)} + } + } + if v, ok := dru.mutation.ClientID(); ok { + if err := devicerequest.ClientIDValidator(v); err != nil { + return &ValidationError{Name: "client_id", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.client_id": %w`, err)} + } + } + if v, ok := dru.mutation.ClientSecret(); ok { + if err := devicerequest.ClientSecretValidator(v); err != nil { + return &ValidationError{Name: "client_secret", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.client_secret": %w`, err)} + } + } + return nil +} + +func (dru *DeviceRequestUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := dru.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(devicerequest.Table, devicerequest.Columns, sqlgraph.NewFieldSpec(devicerequest.FieldID, field.TypeInt)) + if ps := dru.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dru.mutation.UserCode(); ok { + _spec.SetField(devicerequest.FieldUserCode, field.TypeString, value) + } + if value, ok := dru.mutation.DeviceCode(); ok { + _spec.SetField(devicerequest.FieldDeviceCode, field.TypeString, value) + } + if value, ok := dru.mutation.ClientID(); ok { + _spec.SetField(devicerequest.FieldClientID, field.TypeString, value) + } + if value, ok := dru.mutation.ClientSecret(); ok { + _spec.SetField(devicerequest.FieldClientSecret, field.TypeString, value) + } + if value, ok := dru.mutation.Scopes(); ok { + _spec.SetField(devicerequest.FieldScopes, field.TypeJSON, value) + } + if value, ok := dru.mutation.AppendedScopes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, devicerequest.FieldScopes, value) + }) + } + if dru.mutation.ScopesCleared() { + _spec.ClearField(devicerequest.FieldScopes, field.TypeJSON) + } + if value, ok := dru.mutation.Expiry(); ok { + _spec.SetField(devicerequest.FieldExpiry, field.TypeTime, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, dru.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{devicerequest.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + dru.mutation.done = true + return n, nil +} + +// DeviceRequestUpdateOne is the builder for updating a single DeviceRequest entity. +type DeviceRequestUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DeviceRequestMutation +} + +// SetUserCode sets the "user_code" field. +func (druo *DeviceRequestUpdateOne) SetUserCode(s string) *DeviceRequestUpdateOne { + druo.mutation.SetUserCode(s) + return druo +} + +// SetDeviceCode sets the "device_code" field. +func (druo *DeviceRequestUpdateOne) SetDeviceCode(s string) *DeviceRequestUpdateOne { + druo.mutation.SetDeviceCode(s) + return druo +} + +// SetClientID sets the "client_id" field. +func (druo *DeviceRequestUpdateOne) SetClientID(s string) *DeviceRequestUpdateOne { + druo.mutation.SetClientID(s) + return druo +} + +// SetClientSecret sets the "client_secret" field. +func (druo *DeviceRequestUpdateOne) SetClientSecret(s string) *DeviceRequestUpdateOne { + druo.mutation.SetClientSecret(s) + return druo +} + +// SetScopes sets the "scopes" field. +func (druo *DeviceRequestUpdateOne) SetScopes(s []string) *DeviceRequestUpdateOne { + druo.mutation.SetScopes(s) + return druo +} + +// AppendScopes appends s to the "scopes" field. +func (druo *DeviceRequestUpdateOne) AppendScopes(s []string) *DeviceRequestUpdateOne { + druo.mutation.AppendScopes(s) + return druo +} + +// ClearScopes clears the value of the "scopes" field. +func (druo *DeviceRequestUpdateOne) ClearScopes() *DeviceRequestUpdateOne { + druo.mutation.ClearScopes() + return druo +} + +// SetExpiry sets the "expiry" field. +func (druo *DeviceRequestUpdateOne) SetExpiry(t time.Time) *DeviceRequestUpdateOne { + druo.mutation.SetExpiry(t) + return druo +} + +// Mutation returns the DeviceRequestMutation object of the builder. +func (druo *DeviceRequestUpdateOne) Mutation() *DeviceRequestMutation { + return druo.mutation +} + +// Where appends a list predicates to the DeviceRequestUpdate builder. +func (druo *DeviceRequestUpdateOne) Where(ps ...predicate.DeviceRequest) *DeviceRequestUpdateOne { + druo.mutation.Where(ps...) + return druo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (druo *DeviceRequestUpdateOne) Select(field string, fields ...string) *DeviceRequestUpdateOne { + druo.fields = append([]string{field}, fields...) + return druo +} + +// Save executes the query and returns the updated DeviceRequest entity. +func (druo *DeviceRequestUpdateOne) Save(ctx context.Context) (*DeviceRequest, error) { + return withHooks(ctx, druo.sqlSave, druo.mutation, druo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (druo *DeviceRequestUpdateOne) SaveX(ctx context.Context) *DeviceRequest { + node, err := druo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (druo *DeviceRequestUpdateOne) Exec(ctx context.Context) error { + _, err := druo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (druo *DeviceRequestUpdateOne) ExecX(ctx context.Context) { + if err := druo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (druo *DeviceRequestUpdateOne) check() error { + if v, ok := druo.mutation.UserCode(); ok { + if err := devicerequest.UserCodeValidator(v); err != nil { + return &ValidationError{Name: "user_code", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.user_code": %w`, err)} + } + } + if v, ok := druo.mutation.DeviceCode(); ok { + if err := devicerequest.DeviceCodeValidator(v); err != nil { + return &ValidationError{Name: "device_code", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.device_code": %w`, err)} + } + } + if v, ok := druo.mutation.ClientID(); ok { + if err := devicerequest.ClientIDValidator(v); err != nil { + return &ValidationError{Name: "client_id", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.client_id": %w`, err)} + } + } + if v, ok := druo.mutation.ClientSecret(); ok { + if err := devicerequest.ClientSecretValidator(v); err != nil { + return &ValidationError{Name: "client_secret", err: fmt.Errorf(`db: validator failed for field "DeviceRequest.client_secret": %w`, err)} + } + } + return nil +} + +func (druo *DeviceRequestUpdateOne) sqlSave(ctx context.Context) (_node *DeviceRequest, err error) { + if err := druo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(devicerequest.Table, devicerequest.Columns, sqlgraph.NewFieldSpec(devicerequest.FieldID, field.TypeInt)) + id, ok := druo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "DeviceRequest.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := druo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, devicerequest.FieldID) + for _, f := range fields { + if !devicerequest.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != devicerequest.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := druo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := druo.mutation.UserCode(); ok { + _spec.SetField(devicerequest.FieldUserCode, field.TypeString, value) + } + if value, ok := druo.mutation.DeviceCode(); ok { + _spec.SetField(devicerequest.FieldDeviceCode, field.TypeString, value) + } + if value, ok := druo.mutation.ClientID(); ok { + _spec.SetField(devicerequest.FieldClientID, field.TypeString, value) + } + if value, ok := druo.mutation.ClientSecret(); ok { + _spec.SetField(devicerequest.FieldClientSecret, field.TypeString, value) + } + if value, ok := druo.mutation.Scopes(); ok { + _spec.SetField(devicerequest.FieldScopes, field.TypeJSON, value) + } + if value, ok := druo.mutation.AppendedScopes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, devicerequest.FieldScopes, value) + }) + } + if druo.mutation.ScopesCleared() { + _spec.ClearField(devicerequest.FieldScopes, field.TypeJSON) + } + if value, ok := druo.mutation.Expiry(); ok { + _spec.SetField(devicerequest.FieldExpiry, field.TypeTime, value) + } + _node = &DeviceRequest{config: druo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, druo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{devicerequest.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + druo.mutation.done = true + return _node, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken.go new file mode 100644 index 00000000..0eda024e --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken.go @@ -0,0 +1,187 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/devicetoken" +) + +// DeviceToken is the model entity for the DeviceToken schema. +type DeviceToken struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // DeviceCode holds the value of the "device_code" field. + DeviceCode string `json:"device_code,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Token holds the value of the "token" field. + Token *[]byte `json:"token,omitempty"` + // Expiry holds the value of the "expiry" field. + Expiry time.Time `json:"expiry,omitempty"` + // LastRequest holds the value of the "last_request" field. + LastRequest time.Time `json:"last_request,omitempty"` + // PollInterval holds the value of the "poll_interval" field. + PollInterval int `json:"poll_interval,omitempty"` + // CodeChallenge holds the value of the "code_challenge" field. + CodeChallenge string `json:"code_challenge,omitempty"` + // CodeChallengeMethod holds the value of the "code_challenge_method" field. + CodeChallengeMethod string `json:"code_challenge_method,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*DeviceToken) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case devicetoken.FieldToken: + values[i] = new([]byte) + case devicetoken.FieldID, devicetoken.FieldPollInterval: + values[i] = new(sql.NullInt64) + case devicetoken.FieldDeviceCode, devicetoken.FieldStatus, devicetoken.FieldCodeChallenge, devicetoken.FieldCodeChallengeMethod: + values[i] = new(sql.NullString) + case devicetoken.FieldExpiry, devicetoken.FieldLastRequest: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the DeviceToken fields. +func (dt *DeviceToken) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case devicetoken.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + dt.ID = int(value.Int64) + case devicetoken.FieldDeviceCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field device_code", values[i]) + } else if value.Valid { + dt.DeviceCode = value.String + } + case devicetoken.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + dt.Status = value.String + } + case devicetoken.FieldToken: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field token", values[i]) + } else if value != nil { + dt.Token = value + } + case devicetoken.FieldExpiry: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expiry", values[i]) + } else if value.Valid { + dt.Expiry = value.Time + } + case devicetoken.FieldLastRequest: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_request", values[i]) + } else if value.Valid { + dt.LastRequest = value.Time + } + case devicetoken.FieldPollInterval: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field poll_interval", values[i]) + } else if value.Valid { + dt.PollInterval = int(value.Int64) + } + case devicetoken.FieldCodeChallenge: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field code_challenge", values[i]) + } else if value.Valid { + dt.CodeChallenge = value.String + } + case devicetoken.FieldCodeChallengeMethod: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field code_challenge_method", values[i]) + } else if value.Valid { + dt.CodeChallengeMethod = value.String + } + default: + dt.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the DeviceToken. +// This includes values selected through modifiers, order, etc. +func (dt *DeviceToken) Value(name string) (ent.Value, error) { + return dt.selectValues.Get(name) +} + +// Update returns a builder for updating this DeviceToken. +// Note that you need to call DeviceToken.Unwrap() before calling this method if this DeviceToken +// was returned from a transaction, and the transaction was committed or rolled back. +func (dt *DeviceToken) Update() *DeviceTokenUpdateOne { + return NewDeviceTokenClient(dt.config).UpdateOne(dt) +} + +// Unwrap unwraps the DeviceToken entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (dt *DeviceToken) Unwrap() *DeviceToken { + _tx, ok := dt.config.driver.(*txDriver) + if !ok { + panic("db: DeviceToken is not a transactional entity") + } + dt.config.driver = _tx.drv + return dt +} + +// String implements the fmt.Stringer. +func (dt *DeviceToken) String() string { + var builder strings.Builder + builder.WriteString("DeviceToken(") + builder.WriteString(fmt.Sprintf("id=%v, ", dt.ID)) + builder.WriteString("device_code=") + builder.WriteString(dt.DeviceCode) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(dt.Status) + builder.WriteString(", ") + if v := dt.Token; v != nil { + builder.WriteString("token=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("expiry=") + builder.WriteString(dt.Expiry.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("last_request=") + builder.WriteString(dt.LastRequest.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("poll_interval=") + builder.WriteString(fmt.Sprintf("%v", dt.PollInterval)) + builder.WriteString(", ") + builder.WriteString("code_challenge=") + builder.WriteString(dt.CodeChallenge) + builder.WriteString(", ") + builder.WriteString("code_challenge_method=") + builder.WriteString(dt.CodeChallengeMethod) + builder.WriteByte(')') + return builder.String() +} + +// DeviceTokens is a parsable slice of DeviceToken. +type DeviceTokens []*DeviceToken diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/BUILD new file mode 100644 index 00000000..b5e7b3ac --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "devicetoken", + srcs = [ + "devicetoken.go", + "where.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken", + importpath = "github.com/dexidp/dex/storage/ent/db/devicetoken", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/devicetoken.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/devicetoken.go new file mode 100644 index 00000000..72972440 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/devicetoken.go @@ -0,0 +1,109 @@ +// Code generated by ent, DO NOT EDIT. + +package devicetoken + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the devicetoken type in the database. + Label = "device_token" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldDeviceCode holds the string denoting the device_code field in the database. + FieldDeviceCode = "device_code" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldToken holds the string denoting the token field in the database. + FieldToken = "token" + // FieldExpiry holds the string denoting the expiry field in the database. + FieldExpiry = "expiry" + // FieldLastRequest holds the string denoting the last_request field in the database. + FieldLastRequest = "last_request" + // FieldPollInterval holds the string denoting the poll_interval field in the database. + FieldPollInterval = "poll_interval" + // FieldCodeChallenge holds the string denoting the code_challenge field in the database. + FieldCodeChallenge = "code_challenge" + // FieldCodeChallengeMethod holds the string denoting the code_challenge_method field in the database. + FieldCodeChallengeMethod = "code_challenge_method" + // Table holds the table name of the devicetoken in the database. + Table = "device_tokens" +) + +// Columns holds all SQL columns for devicetoken fields. +var Columns = []string{ + FieldID, + FieldDeviceCode, + FieldStatus, + FieldToken, + FieldExpiry, + FieldLastRequest, + FieldPollInterval, + FieldCodeChallenge, + FieldCodeChallengeMethod, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DeviceCodeValidator is a validator for the "device_code" field. It is called by the builders before save. + DeviceCodeValidator func(string) error + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultCodeChallenge holds the default value on creation for the "code_challenge" field. + DefaultCodeChallenge string + // DefaultCodeChallengeMethod holds the default value on creation for the "code_challenge_method" field. + DefaultCodeChallengeMethod string +) + +// OrderOption defines the ordering options for the DeviceToken queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByDeviceCode orders the results by the device_code field. +func ByDeviceCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeviceCode, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByExpiry orders the results by the expiry field. +func ByExpiry(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiry, opts...).ToFunc() +} + +// ByLastRequest orders the results by the last_request field. +func ByLastRequest(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastRequest, opts...).ToFunc() +} + +// ByPollInterval orders the results by the poll_interval field. +func ByPollInterval(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPollInterval, opts...).ToFunc() +} + +// ByCodeChallenge orders the results by the code_challenge field. +func ByCodeChallenge(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCodeChallenge, opts...).ToFunc() +} + +// ByCodeChallengeMethod orders the results by the code_challenge_method field. +func ByCodeChallengeMethod(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCodeChallengeMethod, opts...).ToFunc() +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/where.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/where.go new file mode 100644 index 00000000..f1eddd80 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken/where.go @@ -0,0 +1,557 @@ +// Code generated by ent, DO NOT EDIT. + +package devicetoken + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLTE(FieldID, id)) +} + +// DeviceCode applies equality check predicate on the "device_code" field. It's identical to DeviceCodeEQ. +func DeviceCode(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldDeviceCode, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldStatus, v)) +} + +// Token applies equality check predicate on the "token" field. It's identical to TokenEQ. +func Token(v []byte) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldToken, v)) +} + +// Expiry applies equality check predicate on the "expiry" field. It's identical to ExpiryEQ. +func Expiry(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldExpiry, v)) +} + +// LastRequest applies equality check predicate on the "last_request" field. It's identical to LastRequestEQ. +func LastRequest(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldLastRequest, v)) +} + +// PollInterval applies equality check predicate on the "poll_interval" field. It's identical to PollIntervalEQ. +func PollInterval(v int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldPollInterval, v)) +} + +// CodeChallenge applies equality check predicate on the "code_challenge" field. It's identical to CodeChallengeEQ. +func CodeChallenge(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldCodeChallenge, v)) +} + +// CodeChallengeMethod applies equality check predicate on the "code_challenge_method" field. It's identical to CodeChallengeMethodEQ. +func CodeChallengeMethod(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldCodeChallengeMethod, v)) +} + +// DeviceCodeEQ applies the EQ predicate on the "device_code" field. +func DeviceCodeEQ(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldDeviceCode, v)) +} + +// DeviceCodeNEQ applies the NEQ predicate on the "device_code" field. +func DeviceCodeNEQ(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNEQ(FieldDeviceCode, v)) +} + +// DeviceCodeIn applies the In predicate on the "device_code" field. +func DeviceCodeIn(vs ...string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldIn(FieldDeviceCode, vs...)) +} + +// DeviceCodeNotIn applies the NotIn predicate on the "device_code" field. +func DeviceCodeNotIn(vs ...string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNotIn(FieldDeviceCode, vs...)) +} + +// DeviceCodeGT applies the GT predicate on the "device_code" field. +func DeviceCodeGT(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGT(FieldDeviceCode, v)) +} + +// DeviceCodeGTE applies the GTE predicate on the "device_code" field. +func DeviceCodeGTE(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGTE(FieldDeviceCode, v)) +} + +// DeviceCodeLT applies the LT predicate on the "device_code" field. +func DeviceCodeLT(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLT(FieldDeviceCode, v)) +} + +// DeviceCodeLTE applies the LTE predicate on the "device_code" field. +func DeviceCodeLTE(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLTE(FieldDeviceCode, v)) +} + +// DeviceCodeContains applies the Contains predicate on the "device_code" field. +func DeviceCodeContains(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldContains(FieldDeviceCode, v)) +} + +// DeviceCodeHasPrefix applies the HasPrefix predicate on the "device_code" field. +func DeviceCodeHasPrefix(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldHasPrefix(FieldDeviceCode, v)) +} + +// DeviceCodeHasSuffix applies the HasSuffix predicate on the "device_code" field. +func DeviceCodeHasSuffix(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldHasSuffix(FieldDeviceCode, v)) +} + +// DeviceCodeEqualFold applies the EqualFold predicate on the "device_code" field. +func DeviceCodeEqualFold(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEqualFold(FieldDeviceCode, v)) +} + +// DeviceCodeContainsFold applies the ContainsFold predicate on the "device_code" field. +func DeviceCodeContainsFold(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldContainsFold(FieldDeviceCode, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldContainsFold(FieldStatus, v)) +} + +// TokenEQ applies the EQ predicate on the "token" field. +func TokenEQ(v []byte) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldToken, v)) +} + +// TokenNEQ applies the NEQ predicate on the "token" field. +func TokenNEQ(v []byte) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNEQ(FieldToken, v)) +} + +// TokenIn applies the In predicate on the "token" field. +func TokenIn(vs ...[]byte) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldIn(FieldToken, vs...)) +} + +// TokenNotIn applies the NotIn predicate on the "token" field. +func TokenNotIn(vs ...[]byte) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNotIn(FieldToken, vs...)) +} + +// TokenGT applies the GT predicate on the "token" field. +func TokenGT(v []byte) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGT(FieldToken, v)) +} + +// TokenGTE applies the GTE predicate on the "token" field. +func TokenGTE(v []byte) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGTE(FieldToken, v)) +} + +// TokenLT applies the LT predicate on the "token" field. +func TokenLT(v []byte) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLT(FieldToken, v)) +} + +// TokenLTE applies the LTE predicate on the "token" field. +func TokenLTE(v []byte) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLTE(FieldToken, v)) +} + +// TokenIsNil applies the IsNil predicate on the "token" field. +func TokenIsNil() predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldIsNull(FieldToken)) +} + +// TokenNotNil applies the NotNil predicate on the "token" field. +func TokenNotNil() predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNotNull(FieldToken)) +} + +// ExpiryEQ applies the EQ predicate on the "expiry" field. +func ExpiryEQ(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldExpiry, v)) +} + +// ExpiryNEQ applies the NEQ predicate on the "expiry" field. +func ExpiryNEQ(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNEQ(FieldExpiry, v)) +} + +// ExpiryIn applies the In predicate on the "expiry" field. +func ExpiryIn(vs ...time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldIn(FieldExpiry, vs...)) +} + +// ExpiryNotIn applies the NotIn predicate on the "expiry" field. +func ExpiryNotIn(vs ...time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNotIn(FieldExpiry, vs...)) +} + +// ExpiryGT applies the GT predicate on the "expiry" field. +func ExpiryGT(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGT(FieldExpiry, v)) +} + +// ExpiryGTE applies the GTE predicate on the "expiry" field. +func ExpiryGTE(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGTE(FieldExpiry, v)) +} + +// ExpiryLT applies the LT predicate on the "expiry" field. +func ExpiryLT(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLT(FieldExpiry, v)) +} + +// ExpiryLTE applies the LTE predicate on the "expiry" field. +func ExpiryLTE(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLTE(FieldExpiry, v)) +} + +// LastRequestEQ applies the EQ predicate on the "last_request" field. +func LastRequestEQ(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldLastRequest, v)) +} + +// LastRequestNEQ applies the NEQ predicate on the "last_request" field. +func LastRequestNEQ(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNEQ(FieldLastRequest, v)) +} + +// LastRequestIn applies the In predicate on the "last_request" field. +func LastRequestIn(vs ...time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldIn(FieldLastRequest, vs...)) +} + +// LastRequestNotIn applies the NotIn predicate on the "last_request" field. +func LastRequestNotIn(vs ...time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNotIn(FieldLastRequest, vs...)) +} + +// LastRequestGT applies the GT predicate on the "last_request" field. +func LastRequestGT(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGT(FieldLastRequest, v)) +} + +// LastRequestGTE applies the GTE predicate on the "last_request" field. +func LastRequestGTE(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGTE(FieldLastRequest, v)) +} + +// LastRequestLT applies the LT predicate on the "last_request" field. +func LastRequestLT(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLT(FieldLastRequest, v)) +} + +// LastRequestLTE applies the LTE predicate on the "last_request" field. +func LastRequestLTE(v time.Time) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLTE(FieldLastRequest, v)) +} + +// PollIntervalEQ applies the EQ predicate on the "poll_interval" field. +func PollIntervalEQ(v int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldPollInterval, v)) +} + +// PollIntervalNEQ applies the NEQ predicate on the "poll_interval" field. +func PollIntervalNEQ(v int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNEQ(FieldPollInterval, v)) +} + +// PollIntervalIn applies the In predicate on the "poll_interval" field. +func PollIntervalIn(vs ...int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldIn(FieldPollInterval, vs...)) +} + +// PollIntervalNotIn applies the NotIn predicate on the "poll_interval" field. +func PollIntervalNotIn(vs ...int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNotIn(FieldPollInterval, vs...)) +} + +// PollIntervalGT applies the GT predicate on the "poll_interval" field. +func PollIntervalGT(v int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGT(FieldPollInterval, v)) +} + +// PollIntervalGTE applies the GTE predicate on the "poll_interval" field. +func PollIntervalGTE(v int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGTE(FieldPollInterval, v)) +} + +// PollIntervalLT applies the LT predicate on the "poll_interval" field. +func PollIntervalLT(v int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLT(FieldPollInterval, v)) +} + +// PollIntervalLTE applies the LTE predicate on the "poll_interval" field. +func PollIntervalLTE(v int) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLTE(FieldPollInterval, v)) +} + +// CodeChallengeEQ applies the EQ predicate on the "code_challenge" field. +func CodeChallengeEQ(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldCodeChallenge, v)) +} + +// CodeChallengeNEQ applies the NEQ predicate on the "code_challenge" field. +func CodeChallengeNEQ(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNEQ(FieldCodeChallenge, v)) +} + +// CodeChallengeIn applies the In predicate on the "code_challenge" field. +func CodeChallengeIn(vs ...string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldIn(FieldCodeChallenge, vs...)) +} + +// CodeChallengeNotIn applies the NotIn predicate on the "code_challenge" field. +func CodeChallengeNotIn(vs ...string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNotIn(FieldCodeChallenge, vs...)) +} + +// CodeChallengeGT applies the GT predicate on the "code_challenge" field. +func CodeChallengeGT(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGT(FieldCodeChallenge, v)) +} + +// CodeChallengeGTE applies the GTE predicate on the "code_challenge" field. +func CodeChallengeGTE(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGTE(FieldCodeChallenge, v)) +} + +// CodeChallengeLT applies the LT predicate on the "code_challenge" field. +func CodeChallengeLT(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLT(FieldCodeChallenge, v)) +} + +// CodeChallengeLTE applies the LTE predicate on the "code_challenge" field. +func CodeChallengeLTE(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLTE(FieldCodeChallenge, v)) +} + +// CodeChallengeContains applies the Contains predicate on the "code_challenge" field. +func CodeChallengeContains(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldContains(FieldCodeChallenge, v)) +} + +// CodeChallengeHasPrefix applies the HasPrefix predicate on the "code_challenge" field. +func CodeChallengeHasPrefix(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldHasPrefix(FieldCodeChallenge, v)) +} + +// CodeChallengeHasSuffix applies the HasSuffix predicate on the "code_challenge" field. +func CodeChallengeHasSuffix(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldHasSuffix(FieldCodeChallenge, v)) +} + +// CodeChallengeEqualFold applies the EqualFold predicate on the "code_challenge" field. +func CodeChallengeEqualFold(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEqualFold(FieldCodeChallenge, v)) +} + +// CodeChallengeContainsFold applies the ContainsFold predicate on the "code_challenge" field. +func CodeChallengeContainsFold(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldContainsFold(FieldCodeChallenge, v)) +} + +// CodeChallengeMethodEQ applies the EQ predicate on the "code_challenge_method" field. +func CodeChallengeMethodEQ(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEQ(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodNEQ applies the NEQ predicate on the "code_challenge_method" field. +func CodeChallengeMethodNEQ(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNEQ(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodIn applies the In predicate on the "code_challenge_method" field. +func CodeChallengeMethodIn(vs ...string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldIn(FieldCodeChallengeMethod, vs...)) +} + +// CodeChallengeMethodNotIn applies the NotIn predicate on the "code_challenge_method" field. +func CodeChallengeMethodNotIn(vs ...string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldNotIn(FieldCodeChallengeMethod, vs...)) +} + +// CodeChallengeMethodGT applies the GT predicate on the "code_challenge_method" field. +func CodeChallengeMethodGT(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGT(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodGTE applies the GTE predicate on the "code_challenge_method" field. +func CodeChallengeMethodGTE(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldGTE(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodLT applies the LT predicate on the "code_challenge_method" field. +func CodeChallengeMethodLT(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLT(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodLTE applies the LTE predicate on the "code_challenge_method" field. +func CodeChallengeMethodLTE(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldLTE(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodContains applies the Contains predicate on the "code_challenge_method" field. +func CodeChallengeMethodContains(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldContains(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodHasPrefix applies the HasPrefix predicate on the "code_challenge_method" field. +func CodeChallengeMethodHasPrefix(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldHasPrefix(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodHasSuffix applies the HasSuffix predicate on the "code_challenge_method" field. +func CodeChallengeMethodHasSuffix(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldHasSuffix(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodEqualFold applies the EqualFold predicate on the "code_challenge_method" field. +func CodeChallengeMethodEqualFold(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldEqualFold(FieldCodeChallengeMethod, v)) +} + +// CodeChallengeMethodContainsFold applies the ContainsFold predicate on the "code_challenge_method" field. +func CodeChallengeMethodContainsFold(v string) predicate.DeviceToken { + return predicate.DeviceToken(sql.FieldContainsFold(FieldCodeChallengeMethod, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.DeviceToken) predicate.DeviceToken { + return predicate.DeviceToken(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.DeviceToken) predicate.DeviceToken { + return predicate.DeviceToken(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.DeviceToken) predicate.DeviceToken { + return predicate.DeviceToken(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_create.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_create.go new file mode 100644 index 00000000..31e90fa2 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_create.go @@ -0,0 +1,308 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/devicetoken" +) + +// DeviceTokenCreate is the builder for creating a DeviceToken entity. +type DeviceTokenCreate struct { + config + mutation *DeviceTokenMutation + hooks []Hook +} + +// SetDeviceCode sets the "device_code" field. +func (dtc *DeviceTokenCreate) SetDeviceCode(s string) *DeviceTokenCreate { + dtc.mutation.SetDeviceCode(s) + return dtc +} + +// SetStatus sets the "status" field. +func (dtc *DeviceTokenCreate) SetStatus(s string) *DeviceTokenCreate { + dtc.mutation.SetStatus(s) + return dtc +} + +// SetToken sets the "token" field. +func (dtc *DeviceTokenCreate) SetToken(b []byte) *DeviceTokenCreate { + dtc.mutation.SetToken(b) + return dtc +} + +// SetExpiry sets the "expiry" field. +func (dtc *DeviceTokenCreate) SetExpiry(t time.Time) *DeviceTokenCreate { + dtc.mutation.SetExpiry(t) + return dtc +} + +// SetLastRequest sets the "last_request" field. +func (dtc *DeviceTokenCreate) SetLastRequest(t time.Time) *DeviceTokenCreate { + dtc.mutation.SetLastRequest(t) + return dtc +} + +// SetPollInterval sets the "poll_interval" field. +func (dtc *DeviceTokenCreate) SetPollInterval(i int) *DeviceTokenCreate { + dtc.mutation.SetPollInterval(i) + return dtc +} + +// SetCodeChallenge sets the "code_challenge" field. +func (dtc *DeviceTokenCreate) SetCodeChallenge(s string) *DeviceTokenCreate { + dtc.mutation.SetCodeChallenge(s) + return dtc +} + +// SetNillableCodeChallenge sets the "code_challenge" field if the given value is not nil. +func (dtc *DeviceTokenCreate) SetNillableCodeChallenge(s *string) *DeviceTokenCreate { + if s != nil { + dtc.SetCodeChallenge(*s) + } + return dtc +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (dtc *DeviceTokenCreate) SetCodeChallengeMethod(s string) *DeviceTokenCreate { + dtc.mutation.SetCodeChallengeMethod(s) + return dtc +} + +// SetNillableCodeChallengeMethod sets the "code_challenge_method" field if the given value is not nil. +func (dtc *DeviceTokenCreate) SetNillableCodeChallengeMethod(s *string) *DeviceTokenCreate { + if s != nil { + dtc.SetCodeChallengeMethod(*s) + } + return dtc +} + +// Mutation returns the DeviceTokenMutation object of the builder. +func (dtc *DeviceTokenCreate) Mutation() *DeviceTokenMutation { + return dtc.mutation +} + +// Save creates the DeviceToken in the database. +func (dtc *DeviceTokenCreate) Save(ctx context.Context) (*DeviceToken, error) { + dtc.defaults() + return withHooks(ctx, dtc.sqlSave, dtc.mutation, dtc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (dtc *DeviceTokenCreate) SaveX(ctx context.Context) *DeviceToken { + v, err := dtc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dtc *DeviceTokenCreate) Exec(ctx context.Context) error { + _, err := dtc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtc *DeviceTokenCreate) ExecX(ctx context.Context) { + if err := dtc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dtc *DeviceTokenCreate) defaults() { + if _, ok := dtc.mutation.CodeChallenge(); !ok { + v := devicetoken.DefaultCodeChallenge + dtc.mutation.SetCodeChallenge(v) + } + if _, ok := dtc.mutation.CodeChallengeMethod(); !ok { + v := devicetoken.DefaultCodeChallengeMethod + dtc.mutation.SetCodeChallengeMethod(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dtc *DeviceTokenCreate) check() error { + if _, ok := dtc.mutation.DeviceCode(); !ok { + return &ValidationError{Name: "device_code", err: errors.New(`db: missing required field "DeviceToken.device_code"`)} + } + if v, ok := dtc.mutation.DeviceCode(); ok { + if err := devicetoken.DeviceCodeValidator(v); err != nil { + return &ValidationError{Name: "device_code", err: fmt.Errorf(`db: validator failed for field "DeviceToken.device_code": %w`, err)} + } + } + if _, ok := dtc.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`db: missing required field "DeviceToken.status"`)} + } + if v, ok := dtc.mutation.Status(); ok { + if err := devicetoken.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`db: validator failed for field "DeviceToken.status": %w`, err)} + } + } + if _, ok := dtc.mutation.Expiry(); !ok { + return &ValidationError{Name: "expiry", err: errors.New(`db: missing required field "DeviceToken.expiry"`)} + } + if _, ok := dtc.mutation.LastRequest(); !ok { + return &ValidationError{Name: "last_request", err: errors.New(`db: missing required field "DeviceToken.last_request"`)} + } + if _, ok := dtc.mutation.PollInterval(); !ok { + return &ValidationError{Name: "poll_interval", err: errors.New(`db: missing required field "DeviceToken.poll_interval"`)} + } + if _, ok := dtc.mutation.CodeChallenge(); !ok { + return &ValidationError{Name: "code_challenge", err: errors.New(`db: missing required field "DeviceToken.code_challenge"`)} + } + if _, ok := dtc.mutation.CodeChallengeMethod(); !ok { + return &ValidationError{Name: "code_challenge_method", err: errors.New(`db: missing required field "DeviceToken.code_challenge_method"`)} + } + return nil +} + +func (dtc *DeviceTokenCreate) sqlSave(ctx context.Context) (*DeviceToken, error) { + if err := dtc.check(); err != nil { + return nil, err + } + _node, _spec := dtc.createSpec() + if err := sqlgraph.CreateNode(ctx, dtc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + dtc.mutation.id = &_node.ID + dtc.mutation.done = true + return _node, nil +} + +func (dtc *DeviceTokenCreate) createSpec() (*DeviceToken, *sqlgraph.CreateSpec) { + var ( + _node = &DeviceToken{config: dtc.config} + _spec = sqlgraph.NewCreateSpec(devicetoken.Table, sqlgraph.NewFieldSpec(devicetoken.FieldID, field.TypeInt)) + ) + if value, ok := dtc.mutation.DeviceCode(); ok { + _spec.SetField(devicetoken.FieldDeviceCode, field.TypeString, value) + _node.DeviceCode = value + } + if value, ok := dtc.mutation.Status(); ok { + _spec.SetField(devicetoken.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := dtc.mutation.Token(); ok { + _spec.SetField(devicetoken.FieldToken, field.TypeBytes, value) + _node.Token = &value + } + if value, ok := dtc.mutation.Expiry(); ok { + _spec.SetField(devicetoken.FieldExpiry, field.TypeTime, value) + _node.Expiry = value + } + if value, ok := dtc.mutation.LastRequest(); ok { + _spec.SetField(devicetoken.FieldLastRequest, field.TypeTime, value) + _node.LastRequest = value + } + if value, ok := dtc.mutation.PollInterval(); ok { + _spec.SetField(devicetoken.FieldPollInterval, field.TypeInt, value) + _node.PollInterval = value + } + if value, ok := dtc.mutation.CodeChallenge(); ok { + _spec.SetField(devicetoken.FieldCodeChallenge, field.TypeString, value) + _node.CodeChallenge = value + } + if value, ok := dtc.mutation.CodeChallengeMethod(); ok { + _spec.SetField(devicetoken.FieldCodeChallengeMethod, field.TypeString, value) + _node.CodeChallengeMethod = value + } + return _node, _spec +} + +// DeviceTokenCreateBulk is the builder for creating many DeviceToken entities in bulk. +type DeviceTokenCreateBulk struct { + config + builders []*DeviceTokenCreate +} + +// Save creates the DeviceToken entities in the database. +func (dtcb *DeviceTokenCreateBulk) Save(ctx context.Context) ([]*DeviceToken, error) { + specs := make([]*sqlgraph.CreateSpec, len(dtcb.builders)) + nodes := make([]*DeviceToken, len(dtcb.builders)) + mutators := make([]Mutator, len(dtcb.builders)) + for i := range dtcb.builders { + func(i int, root context.Context) { + builder := dtcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DeviceTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dtcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dtcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dtcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dtcb *DeviceTokenCreateBulk) SaveX(ctx context.Context) []*DeviceToken { + v, err := dtcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dtcb *DeviceTokenCreateBulk) Exec(ctx context.Context) error { + _, err := dtcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtcb *DeviceTokenCreateBulk) ExecX(ctx context.Context) { + if err := dtcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_delete.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_delete.go new file mode 100644 index 00000000..9632450b --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/devicetoken" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// DeviceTokenDelete is the builder for deleting a DeviceToken entity. +type DeviceTokenDelete struct { + config + hooks []Hook + mutation *DeviceTokenMutation +} + +// Where appends a list predicates to the DeviceTokenDelete builder. +func (dtd *DeviceTokenDelete) Where(ps ...predicate.DeviceToken) *DeviceTokenDelete { + dtd.mutation.Where(ps...) + return dtd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dtd *DeviceTokenDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, dtd.sqlExec, dtd.mutation, dtd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtd *DeviceTokenDelete) ExecX(ctx context.Context) int { + n, err := dtd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dtd *DeviceTokenDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(devicetoken.Table, sqlgraph.NewFieldSpec(devicetoken.FieldID, field.TypeInt)) + if ps := dtd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dtd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + dtd.mutation.done = true + return affected, err +} + +// DeviceTokenDeleteOne is the builder for deleting a single DeviceToken entity. +type DeviceTokenDeleteOne struct { + dtd *DeviceTokenDelete +} + +// Where appends a list predicates to the DeviceTokenDelete builder. +func (dtdo *DeviceTokenDeleteOne) Where(ps ...predicate.DeviceToken) *DeviceTokenDeleteOne { + dtdo.dtd.mutation.Where(ps...) + return dtdo +} + +// Exec executes the deletion query. +func (dtdo *DeviceTokenDeleteOne) Exec(ctx context.Context) error { + n, err := dtdo.dtd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{devicetoken.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtdo *DeviceTokenDeleteOne) ExecX(ctx context.Context) { + if err := dtdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_query.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_query.go new file mode 100644 index 00000000..866e977c --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/devicetoken" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// DeviceTokenQuery is the builder for querying DeviceToken entities. +type DeviceTokenQuery struct { + config + ctx *QueryContext + order []devicetoken.OrderOption + inters []Interceptor + predicates []predicate.DeviceToken + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DeviceTokenQuery builder. +func (dtq *DeviceTokenQuery) Where(ps ...predicate.DeviceToken) *DeviceTokenQuery { + dtq.predicates = append(dtq.predicates, ps...) + return dtq +} + +// Limit the number of records to be returned by this query. +func (dtq *DeviceTokenQuery) Limit(limit int) *DeviceTokenQuery { + dtq.ctx.Limit = &limit + return dtq +} + +// Offset to start from. +func (dtq *DeviceTokenQuery) Offset(offset int) *DeviceTokenQuery { + dtq.ctx.Offset = &offset + return dtq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dtq *DeviceTokenQuery) Unique(unique bool) *DeviceTokenQuery { + dtq.ctx.Unique = &unique + return dtq +} + +// Order specifies how the records should be ordered. +func (dtq *DeviceTokenQuery) Order(o ...devicetoken.OrderOption) *DeviceTokenQuery { + dtq.order = append(dtq.order, o...) + return dtq +} + +// First returns the first DeviceToken entity from the query. +// Returns a *NotFoundError when no DeviceToken was found. +func (dtq *DeviceTokenQuery) First(ctx context.Context) (*DeviceToken, error) { + nodes, err := dtq.Limit(1).All(setContextOp(ctx, dtq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{devicetoken.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dtq *DeviceTokenQuery) FirstX(ctx context.Context) *DeviceToken { + node, err := dtq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first DeviceToken ID from the query. +// Returns a *NotFoundError when no DeviceToken ID was found. +func (dtq *DeviceTokenQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dtq.Limit(1).IDs(setContextOp(ctx, dtq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{devicetoken.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dtq *DeviceTokenQuery) FirstIDX(ctx context.Context) int { + id, err := dtq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single DeviceToken entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one DeviceToken entity is found. +// Returns a *NotFoundError when no DeviceToken entities are found. +func (dtq *DeviceTokenQuery) Only(ctx context.Context) (*DeviceToken, error) { + nodes, err := dtq.Limit(2).All(setContextOp(ctx, dtq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{devicetoken.Label} + default: + return nil, &NotSingularError{devicetoken.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dtq *DeviceTokenQuery) OnlyX(ctx context.Context) *DeviceToken { + node, err := dtq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only DeviceToken ID in the query. +// Returns a *NotSingularError when more than one DeviceToken ID is found. +// Returns a *NotFoundError when no entities are found. +func (dtq *DeviceTokenQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dtq.Limit(2).IDs(setContextOp(ctx, dtq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{devicetoken.Label} + default: + err = &NotSingularError{devicetoken.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dtq *DeviceTokenQuery) OnlyIDX(ctx context.Context) int { + id, err := dtq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of DeviceTokens. +func (dtq *DeviceTokenQuery) All(ctx context.Context) ([]*DeviceToken, error) { + ctx = setContextOp(ctx, dtq.ctx, "All") + if err := dtq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*DeviceToken, *DeviceTokenQuery]() + return withInterceptors[[]*DeviceToken](ctx, dtq, qr, dtq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (dtq *DeviceTokenQuery) AllX(ctx context.Context) []*DeviceToken { + nodes, err := dtq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of DeviceToken IDs. +func (dtq *DeviceTokenQuery) IDs(ctx context.Context) (ids []int, err error) { + if dtq.ctx.Unique == nil && dtq.path != nil { + dtq.Unique(true) + } + ctx = setContextOp(ctx, dtq.ctx, "IDs") + if err = dtq.Select(devicetoken.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dtq *DeviceTokenQuery) IDsX(ctx context.Context) []int { + ids, err := dtq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dtq *DeviceTokenQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, dtq.ctx, "Count") + if err := dtq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, dtq, querierCount[*DeviceTokenQuery](), dtq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (dtq *DeviceTokenQuery) CountX(ctx context.Context) int { + count, err := dtq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dtq *DeviceTokenQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, dtq.ctx, "Exist") + switch _, err := dtq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (dtq *DeviceTokenQuery) ExistX(ctx context.Context) bool { + exist, err := dtq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DeviceTokenQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dtq *DeviceTokenQuery) Clone() *DeviceTokenQuery { + if dtq == nil { + return nil + } + return &DeviceTokenQuery{ + config: dtq.config, + ctx: dtq.ctx.Clone(), + order: append([]devicetoken.OrderOption{}, dtq.order...), + inters: append([]Interceptor{}, dtq.inters...), + predicates: append([]predicate.DeviceToken{}, dtq.predicates...), + // clone intermediate query. + sql: dtq.sql.Clone(), + path: dtq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// DeviceCode string `json:"device_code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.DeviceToken.Query(). +// GroupBy(devicetoken.FieldDeviceCode). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (dtq *DeviceTokenQuery) GroupBy(field string, fields ...string) *DeviceTokenGroupBy { + dtq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DeviceTokenGroupBy{build: dtq} + grbuild.flds = &dtq.ctx.Fields + grbuild.label = devicetoken.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// DeviceCode string `json:"device_code,omitempty"` +// } +// +// client.DeviceToken.Query(). +// Select(devicetoken.FieldDeviceCode). +// Scan(ctx, &v) +func (dtq *DeviceTokenQuery) Select(fields ...string) *DeviceTokenSelect { + dtq.ctx.Fields = append(dtq.ctx.Fields, fields...) + sbuild := &DeviceTokenSelect{DeviceTokenQuery: dtq} + sbuild.label = devicetoken.Label + sbuild.flds, sbuild.scan = &dtq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DeviceTokenSelect configured with the given aggregations. +func (dtq *DeviceTokenQuery) Aggregate(fns ...AggregateFunc) *DeviceTokenSelect { + return dtq.Select().Aggregate(fns...) +} + +func (dtq *DeviceTokenQuery) prepareQuery(ctx context.Context) error { + for _, inter := range dtq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, dtq); err != nil { + return err + } + } + } + for _, f := range dtq.ctx.Fields { + if !devicetoken.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if dtq.path != nil { + prev, err := dtq.path(ctx) + if err != nil { + return err + } + dtq.sql = prev + } + return nil +} + +func (dtq *DeviceTokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DeviceToken, error) { + var ( + nodes = []*DeviceToken{} + _spec = dtq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*DeviceToken).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &DeviceToken{config: dtq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dtq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (dtq *DeviceTokenQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dtq.querySpec() + _spec.Node.Columns = dtq.ctx.Fields + if len(dtq.ctx.Fields) > 0 { + _spec.Unique = dtq.ctx.Unique != nil && *dtq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, dtq.driver, _spec) +} + +func (dtq *DeviceTokenQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(devicetoken.Table, devicetoken.Columns, sqlgraph.NewFieldSpec(devicetoken.FieldID, field.TypeInt)) + _spec.From = dtq.sql + if unique := dtq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if dtq.path != nil { + _spec.Unique = true + } + if fields := dtq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, devicetoken.FieldID) + for i := range fields { + if fields[i] != devicetoken.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := dtq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dtq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := dtq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := dtq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dtq *DeviceTokenQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dtq.driver.Dialect()) + t1 := builder.Table(devicetoken.Table) + columns := dtq.ctx.Fields + if len(columns) == 0 { + columns = devicetoken.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dtq.sql != nil { + selector = dtq.sql + selector.Select(selector.Columns(columns...)...) + } + if dtq.ctx.Unique != nil && *dtq.ctx.Unique { + selector.Distinct() + } + for _, p := range dtq.predicates { + p(selector) + } + for _, p := range dtq.order { + p(selector) + } + if offset := dtq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dtq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DeviceTokenGroupBy is the group-by builder for DeviceToken entities. +type DeviceTokenGroupBy struct { + selector + build *DeviceTokenQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dtgb *DeviceTokenGroupBy) Aggregate(fns ...AggregateFunc) *DeviceTokenGroupBy { + dtgb.fns = append(dtgb.fns, fns...) + return dtgb +} + +// Scan applies the selector query and scans the result into the given value. +func (dtgb *DeviceTokenGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dtgb.build.ctx, "GroupBy") + if err := dtgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DeviceTokenQuery, *DeviceTokenGroupBy](ctx, dtgb.build, dtgb, dtgb.build.inters, v) +} + +func (dtgb *DeviceTokenGroupBy) sqlScan(ctx context.Context, root *DeviceTokenQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(dtgb.fns)) + for _, fn := range dtgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*dtgb.flds)+len(dtgb.fns)) + for _, f := range *dtgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*dtgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dtgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// DeviceTokenSelect is the builder for selecting fields of DeviceToken entities. +type DeviceTokenSelect struct { + *DeviceTokenQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (dts *DeviceTokenSelect) Aggregate(fns ...AggregateFunc) *DeviceTokenSelect { + dts.fns = append(dts.fns, fns...) + return dts +} + +// Scan applies the selector query and scans the result into the given value. +func (dts *DeviceTokenSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dts.ctx, "Select") + if err := dts.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DeviceTokenQuery, *DeviceTokenSelect](ctx, dts.DeviceTokenQuery, dts, dts.inters, v) +} + +func (dts *DeviceTokenSelect) sqlScan(ctx context.Context, root *DeviceTokenQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(dts.fns)) + for _, fn := range dts.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*dts.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dts.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_update.go b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_update.go new file mode 100644 index 00000000..bed8cc47 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/devicetoken_update.go @@ -0,0 +1,426 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/devicetoken" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// DeviceTokenUpdate is the builder for updating DeviceToken entities. +type DeviceTokenUpdate struct { + config + hooks []Hook + mutation *DeviceTokenMutation +} + +// Where appends a list predicates to the DeviceTokenUpdate builder. +func (dtu *DeviceTokenUpdate) Where(ps ...predicate.DeviceToken) *DeviceTokenUpdate { + dtu.mutation.Where(ps...) + return dtu +} + +// SetDeviceCode sets the "device_code" field. +func (dtu *DeviceTokenUpdate) SetDeviceCode(s string) *DeviceTokenUpdate { + dtu.mutation.SetDeviceCode(s) + return dtu +} + +// SetStatus sets the "status" field. +func (dtu *DeviceTokenUpdate) SetStatus(s string) *DeviceTokenUpdate { + dtu.mutation.SetStatus(s) + return dtu +} + +// SetToken sets the "token" field. +func (dtu *DeviceTokenUpdate) SetToken(b []byte) *DeviceTokenUpdate { + dtu.mutation.SetToken(b) + return dtu +} + +// ClearToken clears the value of the "token" field. +func (dtu *DeviceTokenUpdate) ClearToken() *DeviceTokenUpdate { + dtu.mutation.ClearToken() + return dtu +} + +// SetExpiry sets the "expiry" field. +func (dtu *DeviceTokenUpdate) SetExpiry(t time.Time) *DeviceTokenUpdate { + dtu.mutation.SetExpiry(t) + return dtu +} + +// SetLastRequest sets the "last_request" field. +func (dtu *DeviceTokenUpdate) SetLastRequest(t time.Time) *DeviceTokenUpdate { + dtu.mutation.SetLastRequest(t) + return dtu +} + +// SetPollInterval sets the "poll_interval" field. +func (dtu *DeviceTokenUpdate) SetPollInterval(i int) *DeviceTokenUpdate { + dtu.mutation.ResetPollInterval() + dtu.mutation.SetPollInterval(i) + return dtu +} + +// AddPollInterval adds i to the "poll_interval" field. +func (dtu *DeviceTokenUpdate) AddPollInterval(i int) *DeviceTokenUpdate { + dtu.mutation.AddPollInterval(i) + return dtu +} + +// SetCodeChallenge sets the "code_challenge" field. +func (dtu *DeviceTokenUpdate) SetCodeChallenge(s string) *DeviceTokenUpdate { + dtu.mutation.SetCodeChallenge(s) + return dtu +} + +// SetNillableCodeChallenge sets the "code_challenge" field if the given value is not nil. +func (dtu *DeviceTokenUpdate) SetNillableCodeChallenge(s *string) *DeviceTokenUpdate { + if s != nil { + dtu.SetCodeChallenge(*s) + } + return dtu +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (dtu *DeviceTokenUpdate) SetCodeChallengeMethod(s string) *DeviceTokenUpdate { + dtu.mutation.SetCodeChallengeMethod(s) + return dtu +} + +// SetNillableCodeChallengeMethod sets the "code_challenge_method" field if the given value is not nil. +func (dtu *DeviceTokenUpdate) SetNillableCodeChallengeMethod(s *string) *DeviceTokenUpdate { + if s != nil { + dtu.SetCodeChallengeMethod(*s) + } + return dtu +} + +// Mutation returns the DeviceTokenMutation object of the builder. +func (dtu *DeviceTokenUpdate) Mutation() *DeviceTokenMutation { + return dtu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (dtu *DeviceTokenUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, dtu.sqlSave, dtu.mutation, dtu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (dtu *DeviceTokenUpdate) SaveX(ctx context.Context) int { + affected, err := dtu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (dtu *DeviceTokenUpdate) Exec(ctx context.Context) error { + _, err := dtu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtu *DeviceTokenUpdate) ExecX(ctx context.Context) { + if err := dtu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dtu *DeviceTokenUpdate) check() error { + if v, ok := dtu.mutation.DeviceCode(); ok { + if err := devicetoken.DeviceCodeValidator(v); err != nil { + return &ValidationError{Name: "device_code", err: fmt.Errorf(`db: validator failed for field "DeviceToken.device_code": %w`, err)} + } + } + if v, ok := dtu.mutation.Status(); ok { + if err := devicetoken.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`db: validator failed for field "DeviceToken.status": %w`, err)} + } + } + return nil +} + +func (dtu *DeviceTokenUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := dtu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(devicetoken.Table, devicetoken.Columns, sqlgraph.NewFieldSpec(devicetoken.FieldID, field.TypeInt)) + if ps := dtu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dtu.mutation.DeviceCode(); ok { + _spec.SetField(devicetoken.FieldDeviceCode, field.TypeString, value) + } + if value, ok := dtu.mutation.Status(); ok { + _spec.SetField(devicetoken.FieldStatus, field.TypeString, value) + } + if value, ok := dtu.mutation.Token(); ok { + _spec.SetField(devicetoken.FieldToken, field.TypeBytes, value) + } + if dtu.mutation.TokenCleared() { + _spec.ClearField(devicetoken.FieldToken, field.TypeBytes) + } + if value, ok := dtu.mutation.Expiry(); ok { + _spec.SetField(devicetoken.FieldExpiry, field.TypeTime, value) + } + if value, ok := dtu.mutation.LastRequest(); ok { + _spec.SetField(devicetoken.FieldLastRequest, field.TypeTime, value) + } + if value, ok := dtu.mutation.PollInterval(); ok { + _spec.SetField(devicetoken.FieldPollInterval, field.TypeInt, value) + } + if value, ok := dtu.mutation.AddedPollInterval(); ok { + _spec.AddField(devicetoken.FieldPollInterval, field.TypeInt, value) + } + if value, ok := dtu.mutation.CodeChallenge(); ok { + _spec.SetField(devicetoken.FieldCodeChallenge, field.TypeString, value) + } + if value, ok := dtu.mutation.CodeChallengeMethod(); ok { + _spec.SetField(devicetoken.FieldCodeChallengeMethod, field.TypeString, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, dtu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{devicetoken.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + dtu.mutation.done = true + return n, nil +} + +// DeviceTokenUpdateOne is the builder for updating a single DeviceToken entity. +type DeviceTokenUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DeviceTokenMutation +} + +// SetDeviceCode sets the "device_code" field. +func (dtuo *DeviceTokenUpdateOne) SetDeviceCode(s string) *DeviceTokenUpdateOne { + dtuo.mutation.SetDeviceCode(s) + return dtuo +} + +// SetStatus sets the "status" field. +func (dtuo *DeviceTokenUpdateOne) SetStatus(s string) *DeviceTokenUpdateOne { + dtuo.mutation.SetStatus(s) + return dtuo +} + +// SetToken sets the "token" field. +func (dtuo *DeviceTokenUpdateOne) SetToken(b []byte) *DeviceTokenUpdateOne { + dtuo.mutation.SetToken(b) + return dtuo +} + +// ClearToken clears the value of the "token" field. +func (dtuo *DeviceTokenUpdateOne) ClearToken() *DeviceTokenUpdateOne { + dtuo.mutation.ClearToken() + return dtuo +} + +// SetExpiry sets the "expiry" field. +func (dtuo *DeviceTokenUpdateOne) SetExpiry(t time.Time) *DeviceTokenUpdateOne { + dtuo.mutation.SetExpiry(t) + return dtuo +} + +// SetLastRequest sets the "last_request" field. +func (dtuo *DeviceTokenUpdateOne) SetLastRequest(t time.Time) *DeviceTokenUpdateOne { + dtuo.mutation.SetLastRequest(t) + return dtuo +} + +// SetPollInterval sets the "poll_interval" field. +func (dtuo *DeviceTokenUpdateOne) SetPollInterval(i int) *DeviceTokenUpdateOne { + dtuo.mutation.ResetPollInterval() + dtuo.mutation.SetPollInterval(i) + return dtuo +} + +// AddPollInterval adds i to the "poll_interval" field. +func (dtuo *DeviceTokenUpdateOne) AddPollInterval(i int) *DeviceTokenUpdateOne { + dtuo.mutation.AddPollInterval(i) + return dtuo +} + +// SetCodeChallenge sets the "code_challenge" field. +func (dtuo *DeviceTokenUpdateOne) SetCodeChallenge(s string) *DeviceTokenUpdateOne { + dtuo.mutation.SetCodeChallenge(s) + return dtuo +} + +// SetNillableCodeChallenge sets the "code_challenge" field if the given value is not nil. +func (dtuo *DeviceTokenUpdateOne) SetNillableCodeChallenge(s *string) *DeviceTokenUpdateOne { + if s != nil { + dtuo.SetCodeChallenge(*s) + } + return dtuo +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (dtuo *DeviceTokenUpdateOne) SetCodeChallengeMethod(s string) *DeviceTokenUpdateOne { + dtuo.mutation.SetCodeChallengeMethod(s) + return dtuo +} + +// SetNillableCodeChallengeMethod sets the "code_challenge_method" field if the given value is not nil. +func (dtuo *DeviceTokenUpdateOne) SetNillableCodeChallengeMethod(s *string) *DeviceTokenUpdateOne { + if s != nil { + dtuo.SetCodeChallengeMethod(*s) + } + return dtuo +} + +// Mutation returns the DeviceTokenMutation object of the builder. +func (dtuo *DeviceTokenUpdateOne) Mutation() *DeviceTokenMutation { + return dtuo.mutation +} + +// Where appends a list predicates to the DeviceTokenUpdate builder. +func (dtuo *DeviceTokenUpdateOne) Where(ps ...predicate.DeviceToken) *DeviceTokenUpdateOne { + dtuo.mutation.Where(ps...) + return dtuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (dtuo *DeviceTokenUpdateOne) Select(field string, fields ...string) *DeviceTokenUpdateOne { + dtuo.fields = append([]string{field}, fields...) + return dtuo +} + +// Save executes the query and returns the updated DeviceToken entity. +func (dtuo *DeviceTokenUpdateOne) Save(ctx context.Context) (*DeviceToken, error) { + return withHooks(ctx, dtuo.sqlSave, dtuo.mutation, dtuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (dtuo *DeviceTokenUpdateOne) SaveX(ctx context.Context) *DeviceToken { + node, err := dtuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (dtuo *DeviceTokenUpdateOne) Exec(ctx context.Context) error { + _, err := dtuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dtuo *DeviceTokenUpdateOne) ExecX(ctx context.Context) { + if err := dtuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dtuo *DeviceTokenUpdateOne) check() error { + if v, ok := dtuo.mutation.DeviceCode(); ok { + if err := devicetoken.DeviceCodeValidator(v); err != nil { + return &ValidationError{Name: "device_code", err: fmt.Errorf(`db: validator failed for field "DeviceToken.device_code": %w`, err)} + } + } + if v, ok := dtuo.mutation.Status(); ok { + if err := devicetoken.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`db: validator failed for field "DeviceToken.status": %w`, err)} + } + } + return nil +} + +func (dtuo *DeviceTokenUpdateOne) sqlSave(ctx context.Context) (_node *DeviceToken, err error) { + if err := dtuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(devicetoken.Table, devicetoken.Columns, sqlgraph.NewFieldSpec(devicetoken.FieldID, field.TypeInt)) + id, ok := dtuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "DeviceToken.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := dtuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, devicetoken.FieldID) + for _, f := range fields { + if !devicetoken.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != devicetoken.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := dtuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dtuo.mutation.DeviceCode(); ok { + _spec.SetField(devicetoken.FieldDeviceCode, field.TypeString, value) + } + if value, ok := dtuo.mutation.Status(); ok { + _spec.SetField(devicetoken.FieldStatus, field.TypeString, value) + } + if value, ok := dtuo.mutation.Token(); ok { + _spec.SetField(devicetoken.FieldToken, field.TypeBytes, value) + } + if dtuo.mutation.TokenCleared() { + _spec.ClearField(devicetoken.FieldToken, field.TypeBytes) + } + if value, ok := dtuo.mutation.Expiry(); ok { + _spec.SetField(devicetoken.FieldExpiry, field.TypeTime, value) + } + if value, ok := dtuo.mutation.LastRequest(); ok { + _spec.SetField(devicetoken.FieldLastRequest, field.TypeTime, value) + } + if value, ok := dtuo.mutation.PollInterval(); ok { + _spec.SetField(devicetoken.FieldPollInterval, field.TypeInt, value) + } + if value, ok := dtuo.mutation.AddedPollInterval(); ok { + _spec.AddField(devicetoken.FieldPollInterval, field.TypeInt, value) + } + if value, ok := dtuo.mutation.CodeChallenge(); ok { + _spec.SetField(devicetoken.FieldCodeChallenge, field.TypeString, value) + } + if value, ok := dtuo.mutation.CodeChallengeMethod(); ok { + _spec.SetField(devicetoken.FieldCodeChallengeMethod, field.TypeString, value) + } + _node = &DeviceToken{config: dtuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, dtuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{devicetoken.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + dtuo.mutation.done = true + return _node, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/ent.go b/vendor/github.com/dexidp/dex/storage/ent/db/ent.go new file mode 100644 index 00000000..da7a8016 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/ent.go @@ -0,0 +1,626 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/dexidp/dex/storage/ent/db/authcode" + "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/connector" + "github.com/dexidp/dex/storage/ent/db/devicerequest" + "github.com/dexidp/dex/storage/ent/db/devicetoken" + "github.com/dexidp/dex/storage/ent/db/keys" + "github.com/dexidp/dex/storage/ent/db/oauth2client" + "github.com/dexidp/dex/storage/ent/db/offlinesession" + "github.com/dexidp/dex/storage/ent/db/password" + "github.com/dexidp/dex/storage/ent/db/refreshtoken" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// columnChecker checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + authcode.Table: authcode.ValidColumn, + authrequest.Table: authrequest.ValidColumn, + connector.Table: connector.ValidColumn, + devicerequest.Table: devicerequest.ValidColumn, + devicetoken.Table: devicetoken.ValidColumn, + keys.Table: keys.ValidColumn, + oauth2client.Table: oauth2client.ValidColumn, + offlinesession.Table: offlinesession.ValidColumn, + password.Table: password.ValidColumn, + refreshtoken.Table: refreshtoken.ValidColumn, + }) + }) + return columnCheck(table, column) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("db: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("db: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(db.As(db.Sum(field1), "sum_field1"), (db.As(db.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "db: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "db: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "db: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "db: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("db: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("db: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("db: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("db: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("db: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("db: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("db: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("db: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/keys.go b/vendor/github.com/dexidp/dex/storage/ent/db/keys.go new file mode 100644 index 00000000..ff84655e --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/keys.go @@ -0,0 +1,148 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db/keys" + jose "gopkg.in/square/go-jose.v2" +) + +// Keys is the model entity for the Keys schema. +type Keys struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // VerificationKeys holds the value of the "verification_keys" field. + VerificationKeys []storage.VerificationKey `json:"verification_keys,omitempty"` + // SigningKey holds the value of the "signing_key" field. + SigningKey jose.JSONWebKey `json:"signing_key,omitempty"` + // SigningKeyPub holds the value of the "signing_key_pub" field. + SigningKeyPub jose.JSONWebKey `json:"signing_key_pub,omitempty"` + // NextRotation holds the value of the "next_rotation" field. + NextRotation time.Time `json:"next_rotation,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Keys) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case keys.FieldVerificationKeys, keys.FieldSigningKey, keys.FieldSigningKeyPub: + values[i] = new([]byte) + case keys.FieldID: + values[i] = new(sql.NullString) + case keys.FieldNextRotation: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Keys fields. +func (k *Keys) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case keys.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + k.ID = value.String + } + case keys.FieldVerificationKeys: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field verification_keys", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &k.VerificationKeys); err != nil { + return fmt.Errorf("unmarshal field verification_keys: %w", err) + } + } + case keys.FieldSigningKey: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field signing_key", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &k.SigningKey); err != nil { + return fmt.Errorf("unmarshal field signing_key: %w", err) + } + } + case keys.FieldSigningKeyPub: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field signing_key_pub", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &k.SigningKeyPub); err != nil { + return fmt.Errorf("unmarshal field signing_key_pub: %w", err) + } + } + case keys.FieldNextRotation: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field next_rotation", values[i]) + } else if value.Valid { + k.NextRotation = value.Time + } + default: + k.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Keys. +// This includes values selected through modifiers, order, etc. +func (k *Keys) Value(name string) (ent.Value, error) { + return k.selectValues.Get(name) +} + +// Update returns a builder for updating this Keys. +// Note that you need to call Keys.Unwrap() before calling this method if this Keys +// was returned from a transaction, and the transaction was committed or rolled back. +func (k *Keys) Update() *KeysUpdateOne { + return NewKeysClient(k.config).UpdateOne(k) +} + +// Unwrap unwraps the Keys entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (k *Keys) Unwrap() *Keys { + _tx, ok := k.config.driver.(*txDriver) + if !ok { + panic("db: Keys is not a transactional entity") + } + k.config.driver = _tx.drv + return k +} + +// String implements the fmt.Stringer. +func (k *Keys) String() string { + var builder strings.Builder + builder.WriteString("Keys(") + builder.WriteString(fmt.Sprintf("id=%v, ", k.ID)) + builder.WriteString("verification_keys=") + builder.WriteString(fmt.Sprintf("%v", k.VerificationKeys)) + builder.WriteString(", ") + builder.WriteString("signing_key=") + builder.WriteString(fmt.Sprintf("%v", k.SigningKey)) + builder.WriteString(", ") + builder.WriteString("signing_key_pub=") + builder.WriteString(fmt.Sprintf("%v", k.SigningKeyPub)) + builder.WriteString(", ") + builder.WriteString("next_rotation=") + builder.WriteString(k.NextRotation.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// KeysSlice is a parsable slice of Keys. +type KeysSlice []*Keys diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/keys/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/keys/BUILD new file mode 100644 index 00000000..e7404e4c --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/keys/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "keys", + srcs = [ + "keys.go", + "where.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/keys", + importpath = "github.com/dexidp/dex/storage/ent/db/keys", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/keys/keys.go b/vendor/github.com/dexidp/dex/storage/ent/db/keys/keys.go new file mode 100644 index 00000000..a00f39b1 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/keys/keys.go @@ -0,0 +1,61 @@ +// Code generated by ent, DO NOT EDIT. + +package keys + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the keys type in the database. + Label = "keys" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldVerificationKeys holds the string denoting the verification_keys field in the database. + FieldVerificationKeys = "verification_keys" + // FieldSigningKey holds the string denoting the signing_key field in the database. + FieldSigningKey = "signing_key" + // FieldSigningKeyPub holds the string denoting the signing_key_pub field in the database. + FieldSigningKeyPub = "signing_key_pub" + // FieldNextRotation holds the string denoting the next_rotation field in the database. + FieldNextRotation = "next_rotation" + // Table holds the table name of the keys in the database. + Table = "keys" +) + +// Columns holds all SQL columns for keys fields. +var Columns = []string{ + FieldID, + FieldVerificationKeys, + FieldSigningKey, + FieldSigningKeyPub, + FieldNextRotation, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // IDValidator is a validator for the "id" field. It is called by the builders before save. + IDValidator func(string) error +) + +// OrderOption defines the ordering options for the Keys queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByNextRotation orders the results by the next_rotation field. +func ByNextRotation(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNextRotation, opts...).ToFunc() +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/keys/where.go b/vendor/github.com/dexidp/dex/storage/ent/db/keys/where.go new file mode 100644 index 00000000..9b31c744 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/keys/where.go @@ -0,0 +1,142 @@ +// Code generated by ent, DO NOT EDIT. + +package keys + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.Keys { + return predicate.Keys(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.Keys { + return predicate.Keys(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.Keys { + return predicate.Keys(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.Keys { + return predicate.Keys(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.Keys { + return predicate.Keys(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.Keys { + return predicate.Keys(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.Keys { + return predicate.Keys(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.Keys { + return predicate.Keys(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.Keys { + return predicate.Keys(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.Keys { + return predicate.Keys(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.Keys { + return predicate.Keys(sql.FieldContainsFold(FieldID, id)) +} + +// NextRotation applies equality check predicate on the "next_rotation" field. It's identical to NextRotationEQ. +func NextRotation(v time.Time) predicate.Keys { + return predicate.Keys(sql.FieldEQ(FieldNextRotation, v)) +} + +// NextRotationEQ applies the EQ predicate on the "next_rotation" field. +func NextRotationEQ(v time.Time) predicate.Keys { + return predicate.Keys(sql.FieldEQ(FieldNextRotation, v)) +} + +// NextRotationNEQ applies the NEQ predicate on the "next_rotation" field. +func NextRotationNEQ(v time.Time) predicate.Keys { + return predicate.Keys(sql.FieldNEQ(FieldNextRotation, v)) +} + +// NextRotationIn applies the In predicate on the "next_rotation" field. +func NextRotationIn(vs ...time.Time) predicate.Keys { + return predicate.Keys(sql.FieldIn(FieldNextRotation, vs...)) +} + +// NextRotationNotIn applies the NotIn predicate on the "next_rotation" field. +func NextRotationNotIn(vs ...time.Time) predicate.Keys { + return predicate.Keys(sql.FieldNotIn(FieldNextRotation, vs...)) +} + +// NextRotationGT applies the GT predicate on the "next_rotation" field. +func NextRotationGT(v time.Time) predicate.Keys { + return predicate.Keys(sql.FieldGT(FieldNextRotation, v)) +} + +// NextRotationGTE applies the GTE predicate on the "next_rotation" field. +func NextRotationGTE(v time.Time) predicate.Keys { + return predicate.Keys(sql.FieldGTE(FieldNextRotation, v)) +} + +// NextRotationLT applies the LT predicate on the "next_rotation" field. +func NextRotationLT(v time.Time) predicate.Keys { + return predicate.Keys(sql.FieldLT(FieldNextRotation, v)) +} + +// NextRotationLTE applies the LTE predicate on the "next_rotation" field. +func NextRotationLTE(v time.Time) predicate.Keys { + return predicate.Keys(sql.FieldLTE(FieldNextRotation, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Keys) predicate.Keys { + return predicate.Keys(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Keys) predicate.Keys { + return predicate.Keys(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Keys) predicate.Keys { + return predicate.Keys(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/keys_create.go b/vendor/github.com/dexidp/dex/storage/ent/db/keys_create.go new file mode 100644 index 00000000..07943af9 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/keys_create.go @@ -0,0 +1,237 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db/keys" + jose "gopkg.in/square/go-jose.v2" +) + +// KeysCreate is the builder for creating a Keys entity. +type KeysCreate struct { + config + mutation *KeysMutation + hooks []Hook +} + +// SetVerificationKeys sets the "verification_keys" field. +func (kc *KeysCreate) SetVerificationKeys(sk []storage.VerificationKey) *KeysCreate { + kc.mutation.SetVerificationKeys(sk) + return kc +} + +// SetSigningKey sets the "signing_key" field. +func (kc *KeysCreate) SetSigningKey(jwk jose.JSONWebKey) *KeysCreate { + kc.mutation.SetSigningKey(jwk) + return kc +} + +// SetSigningKeyPub sets the "signing_key_pub" field. +func (kc *KeysCreate) SetSigningKeyPub(jwk jose.JSONWebKey) *KeysCreate { + kc.mutation.SetSigningKeyPub(jwk) + return kc +} + +// SetNextRotation sets the "next_rotation" field. +func (kc *KeysCreate) SetNextRotation(t time.Time) *KeysCreate { + kc.mutation.SetNextRotation(t) + return kc +} + +// SetID sets the "id" field. +func (kc *KeysCreate) SetID(s string) *KeysCreate { + kc.mutation.SetID(s) + return kc +} + +// Mutation returns the KeysMutation object of the builder. +func (kc *KeysCreate) Mutation() *KeysMutation { + return kc.mutation +} + +// Save creates the Keys in the database. +func (kc *KeysCreate) Save(ctx context.Context) (*Keys, error) { + return withHooks(ctx, kc.sqlSave, kc.mutation, kc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (kc *KeysCreate) SaveX(ctx context.Context) *Keys { + v, err := kc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (kc *KeysCreate) Exec(ctx context.Context) error { + _, err := kc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (kc *KeysCreate) ExecX(ctx context.Context) { + if err := kc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (kc *KeysCreate) check() error { + if _, ok := kc.mutation.VerificationKeys(); !ok { + return &ValidationError{Name: "verification_keys", err: errors.New(`db: missing required field "Keys.verification_keys"`)} + } + if _, ok := kc.mutation.SigningKey(); !ok { + return &ValidationError{Name: "signing_key", err: errors.New(`db: missing required field "Keys.signing_key"`)} + } + if _, ok := kc.mutation.SigningKeyPub(); !ok { + return &ValidationError{Name: "signing_key_pub", err: errors.New(`db: missing required field "Keys.signing_key_pub"`)} + } + if _, ok := kc.mutation.NextRotation(); !ok { + return &ValidationError{Name: "next_rotation", err: errors.New(`db: missing required field "Keys.next_rotation"`)} + } + if v, ok := kc.mutation.ID(); ok { + if err := keys.IDValidator(v); err != nil { + return &ValidationError{Name: "id", err: fmt.Errorf(`db: validator failed for field "Keys.id": %w`, err)} + } + } + return nil +} + +func (kc *KeysCreate) sqlSave(ctx context.Context) (*Keys, error) { + if err := kc.check(); err != nil { + return nil, err + } + _node, _spec := kc.createSpec() + if err := sqlgraph.CreateNode(ctx, kc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected Keys.ID type: %T", _spec.ID.Value) + } + } + kc.mutation.id = &_node.ID + kc.mutation.done = true + return _node, nil +} + +func (kc *KeysCreate) createSpec() (*Keys, *sqlgraph.CreateSpec) { + var ( + _node = &Keys{config: kc.config} + _spec = sqlgraph.NewCreateSpec(keys.Table, sqlgraph.NewFieldSpec(keys.FieldID, field.TypeString)) + ) + if id, ok := kc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := kc.mutation.VerificationKeys(); ok { + _spec.SetField(keys.FieldVerificationKeys, field.TypeJSON, value) + _node.VerificationKeys = value + } + if value, ok := kc.mutation.SigningKey(); ok { + _spec.SetField(keys.FieldSigningKey, field.TypeJSON, value) + _node.SigningKey = value + } + if value, ok := kc.mutation.SigningKeyPub(); ok { + _spec.SetField(keys.FieldSigningKeyPub, field.TypeJSON, value) + _node.SigningKeyPub = value + } + if value, ok := kc.mutation.NextRotation(); ok { + _spec.SetField(keys.FieldNextRotation, field.TypeTime, value) + _node.NextRotation = value + } + return _node, _spec +} + +// KeysCreateBulk is the builder for creating many Keys entities in bulk. +type KeysCreateBulk struct { + config + builders []*KeysCreate +} + +// Save creates the Keys entities in the database. +func (kcb *KeysCreateBulk) Save(ctx context.Context) ([]*Keys, error) { + specs := make([]*sqlgraph.CreateSpec, len(kcb.builders)) + nodes := make([]*Keys, len(kcb.builders)) + mutators := make([]Mutator, len(kcb.builders)) + for i := range kcb.builders { + func(i int, root context.Context) { + builder := kcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*KeysMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, kcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, kcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, kcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (kcb *KeysCreateBulk) SaveX(ctx context.Context) []*Keys { + v, err := kcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (kcb *KeysCreateBulk) Exec(ctx context.Context) error { + _, err := kcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (kcb *KeysCreateBulk) ExecX(ctx context.Context) { + if err := kcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/keys_delete.go b/vendor/github.com/dexidp/dex/storage/ent/db/keys_delete.go new file mode 100644 index 00000000..7f661194 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/keys_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/keys" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// KeysDelete is the builder for deleting a Keys entity. +type KeysDelete struct { + config + hooks []Hook + mutation *KeysMutation +} + +// Where appends a list predicates to the KeysDelete builder. +func (kd *KeysDelete) Where(ps ...predicate.Keys) *KeysDelete { + kd.mutation.Where(ps...) + return kd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (kd *KeysDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, kd.sqlExec, kd.mutation, kd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (kd *KeysDelete) ExecX(ctx context.Context) int { + n, err := kd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (kd *KeysDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(keys.Table, sqlgraph.NewFieldSpec(keys.FieldID, field.TypeString)) + if ps := kd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, kd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + kd.mutation.done = true + return affected, err +} + +// KeysDeleteOne is the builder for deleting a single Keys entity. +type KeysDeleteOne struct { + kd *KeysDelete +} + +// Where appends a list predicates to the KeysDelete builder. +func (kdo *KeysDeleteOne) Where(ps ...predicate.Keys) *KeysDeleteOne { + kdo.kd.mutation.Where(ps...) + return kdo +} + +// Exec executes the deletion query. +func (kdo *KeysDeleteOne) Exec(ctx context.Context) error { + n, err := kdo.kd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{keys.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (kdo *KeysDeleteOne) ExecX(ctx context.Context) { + if err := kdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/keys_query.go b/vendor/github.com/dexidp/dex/storage/ent/db/keys_query.go new file mode 100644 index 00000000..3be00ff4 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/keys_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/keys" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// KeysQuery is the builder for querying Keys entities. +type KeysQuery struct { + config + ctx *QueryContext + order []keys.OrderOption + inters []Interceptor + predicates []predicate.Keys + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the KeysQuery builder. +func (kq *KeysQuery) Where(ps ...predicate.Keys) *KeysQuery { + kq.predicates = append(kq.predicates, ps...) + return kq +} + +// Limit the number of records to be returned by this query. +func (kq *KeysQuery) Limit(limit int) *KeysQuery { + kq.ctx.Limit = &limit + return kq +} + +// Offset to start from. +func (kq *KeysQuery) Offset(offset int) *KeysQuery { + kq.ctx.Offset = &offset + return kq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (kq *KeysQuery) Unique(unique bool) *KeysQuery { + kq.ctx.Unique = &unique + return kq +} + +// Order specifies how the records should be ordered. +func (kq *KeysQuery) Order(o ...keys.OrderOption) *KeysQuery { + kq.order = append(kq.order, o...) + return kq +} + +// First returns the first Keys entity from the query. +// Returns a *NotFoundError when no Keys was found. +func (kq *KeysQuery) First(ctx context.Context) (*Keys, error) { + nodes, err := kq.Limit(1).All(setContextOp(ctx, kq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{keys.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (kq *KeysQuery) FirstX(ctx context.Context) *Keys { + node, err := kq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Keys ID from the query. +// Returns a *NotFoundError when no Keys ID was found. +func (kq *KeysQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = kq.Limit(1).IDs(setContextOp(ctx, kq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{keys.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (kq *KeysQuery) FirstIDX(ctx context.Context) string { + id, err := kq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Keys entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Keys entity is found. +// Returns a *NotFoundError when no Keys entities are found. +func (kq *KeysQuery) Only(ctx context.Context) (*Keys, error) { + nodes, err := kq.Limit(2).All(setContextOp(ctx, kq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{keys.Label} + default: + return nil, &NotSingularError{keys.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (kq *KeysQuery) OnlyX(ctx context.Context) *Keys { + node, err := kq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Keys ID in the query. +// Returns a *NotSingularError when more than one Keys ID is found. +// Returns a *NotFoundError when no entities are found. +func (kq *KeysQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = kq.Limit(2).IDs(setContextOp(ctx, kq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{keys.Label} + default: + err = &NotSingularError{keys.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (kq *KeysQuery) OnlyIDX(ctx context.Context) string { + id, err := kq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of KeysSlice. +func (kq *KeysQuery) All(ctx context.Context) ([]*Keys, error) { + ctx = setContextOp(ctx, kq.ctx, "All") + if err := kq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Keys, *KeysQuery]() + return withInterceptors[[]*Keys](ctx, kq, qr, kq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (kq *KeysQuery) AllX(ctx context.Context) []*Keys { + nodes, err := kq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Keys IDs. +func (kq *KeysQuery) IDs(ctx context.Context) (ids []string, err error) { + if kq.ctx.Unique == nil && kq.path != nil { + kq.Unique(true) + } + ctx = setContextOp(ctx, kq.ctx, "IDs") + if err = kq.Select(keys.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (kq *KeysQuery) IDsX(ctx context.Context) []string { + ids, err := kq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (kq *KeysQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, kq.ctx, "Count") + if err := kq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, kq, querierCount[*KeysQuery](), kq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (kq *KeysQuery) CountX(ctx context.Context) int { + count, err := kq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (kq *KeysQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, kq.ctx, "Exist") + switch _, err := kq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (kq *KeysQuery) ExistX(ctx context.Context) bool { + exist, err := kq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the KeysQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (kq *KeysQuery) Clone() *KeysQuery { + if kq == nil { + return nil + } + return &KeysQuery{ + config: kq.config, + ctx: kq.ctx.Clone(), + order: append([]keys.OrderOption{}, kq.order...), + inters: append([]Interceptor{}, kq.inters...), + predicates: append([]predicate.Keys{}, kq.predicates...), + // clone intermediate query. + sql: kq.sql.Clone(), + path: kq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// VerificationKeys []storage.VerificationKey `json:"verification_keys,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Keys.Query(). +// GroupBy(keys.FieldVerificationKeys). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (kq *KeysQuery) GroupBy(field string, fields ...string) *KeysGroupBy { + kq.ctx.Fields = append([]string{field}, fields...) + grbuild := &KeysGroupBy{build: kq} + grbuild.flds = &kq.ctx.Fields + grbuild.label = keys.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// VerificationKeys []storage.VerificationKey `json:"verification_keys,omitempty"` +// } +// +// client.Keys.Query(). +// Select(keys.FieldVerificationKeys). +// Scan(ctx, &v) +func (kq *KeysQuery) Select(fields ...string) *KeysSelect { + kq.ctx.Fields = append(kq.ctx.Fields, fields...) + sbuild := &KeysSelect{KeysQuery: kq} + sbuild.label = keys.Label + sbuild.flds, sbuild.scan = &kq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a KeysSelect configured with the given aggregations. +func (kq *KeysQuery) Aggregate(fns ...AggregateFunc) *KeysSelect { + return kq.Select().Aggregate(fns...) +} + +func (kq *KeysQuery) prepareQuery(ctx context.Context) error { + for _, inter := range kq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, kq); err != nil { + return err + } + } + } + for _, f := range kq.ctx.Fields { + if !keys.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if kq.path != nil { + prev, err := kq.path(ctx) + if err != nil { + return err + } + kq.sql = prev + } + return nil +} + +func (kq *KeysQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Keys, error) { + var ( + nodes = []*Keys{} + _spec = kq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Keys).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Keys{config: kq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, kq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (kq *KeysQuery) sqlCount(ctx context.Context) (int, error) { + _spec := kq.querySpec() + _spec.Node.Columns = kq.ctx.Fields + if len(kq.ctx.Fields) > 0 { + _spec.Unique = kq.ctx.Unique != nil && *kq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, kq.driver, _spec) +} + +func (kq *KeysQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(keys.Table, keys.Columns, sqlgraph.NewFieldSpec(keys.FieldID, field.TypeString)) + _spec.From = kq.sql + if unique := kq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if kq.path != nil { + _spec.Unique = true + } + if fields := kq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, keys.FieldID) + for i := range fields { + if fields[i] != keys.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := kq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := kq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := kq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := kq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (kq *KeysQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(kq.driver.Dialect()) + t1 := builder.Table(keys.Table) + columns := kq.ctx.Fields + if len(columns) == 0 { + columns = keys.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if kq.sql != nil { + selector = kq.sql + selector.Select(selector.Columns(columns...)...) + } + if kq.ctx.Unique != nil && *kq.ctx.Unique { + selector.Distinct() + } + for _, p := range kq.predicates { + p(selector) + } + for _, p := range kq.order { + p(selector) + } + if offset := kq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := kq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// KeysGroupBy is the group-by builder for Keys entities. +type KeysGroupBy struct { + selector + build *KeysQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (kgb *KeysGroupBy) Aggregate(fns ...AggregateFunc) *KeysGroupBy { + kgb.fns = append(kgb.fns, fns...) + return kgb +} + +// Scan applies the selector query and scans the result into the given value. +func (kgb *KeysGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, kgb.build.ctx, "GroupBy") + if err := kgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*KeysQuery, *KeysGroupBy](ctx, kgb.build, kgb, kgb.build.inters, v) +} + +func (kgb *KeysGroupBy) sqlScan(ctx context.Context, root *KeysQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(kgb.fns)) + for _, fn := range kgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*kgb.flds)+len(kgb.fns)) + for _, f := range *kgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*kgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := kgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// KeysSelect is the builder for selecting fields of Keys entities. +type KeysSelect struct { + *KeysQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ks *KeysSelect) Aggregate(fns ...AggregateFunc) *KeysSelect { + ks.fns = append(ks.fns, fns...) + return ks +} + +// Scan applies the selector query and scans the result into the given value. +func (ks *KeysSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ks.ctx, "Select") + if err := ks.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*KeysQuery, *KeysSelect](ctx, ks.KeysQuery, ks, ks.inters, v) +} + +func (ks *KeysSelect) sqlScan(ctx context.Context, root *KeysQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ks.fns)) + for _, fn := range ks.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ks.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ks.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/keys_update.go b/vendor/github.com/dexidp/dex/storage/ent/db/keys_update.go new file mode 100644 index 00000000..7a059207 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/keys_update.go @@ -0,0 +1,273 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db/keys" + "github.com/dexidp/dex/storage/ent/db/predicate" + jose "gopkg.in/square/go-jose.v2" +) + +// KeysUpdate is the builder for updating Keys entities. +type KeysUpdate struct { + config + hooks []Hook + mutation *KeysMutation +} + +// Where appends a list predicates to the KeysUpdate builder. +func (ku *KeysUpdate) Where(ps ...predicate.Keys) *KeysUpdate { + ku.mutation.Where(ps...) + return ku +} + +// SetVerificationKeys sets the "verification_keys" field. +func (ku *KeysUpdate) SetVerificationKeys(sk []storage.VerificationKey) *KeysUpdate { + ku.mutation.SetVerificationKeys(sk) + return ku +} + +// AppendVerificationKeys appends sk to the "verification_keys" field. +func (ku *KeysUpdate) AppendVerificationKeys(sk []storage.VerificationKey) *KeysUpdate { + ku.mutation.AppendVerificationKeys(sk) + return ku +} + +// SetSigningKey sets the "signing_key" field. +func (ku *KeysUpdate) SetSigningKey(jwk jose.JSONWebKey) *KeysUpdate { + ku.mutation.SetSigningKey(jwk) + return ku +} + +// SetSigningKeyPub sets the "signing_key_pub" field. +func (ku *KeysUpdate) SetSigningKeyPub(jwk jose.JSONWebKey) *KeysUpdate { + ku.mutation.SetSigningKeyPub(jwk) + return ku +} + +// SetNextRotation sets the "next_rotation" field. +func (ku *KeysUpdate) SetNextRotation(t time.Time) *KeysUpdate { + ku.mutation.SetNextRotation(t) + return ku +} + +// Mutation returns the KeysMutation object of the builder. +func (ku *KeysUpdate) Mutation() *KeysMutation { + return ku.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (ku *KeysUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, ku.sqlSave, ku.mutation, ku.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (ku *KeysUpdate) SaveX(ctx context.Context) int { + affected, err := ku.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (ku *KeysUpdate) Exec(ctx context.Context) error { + _, err := ku.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ku *KeysUpdate) ExecX(ctx context.Context) { + if err := ku.Exec(ctx); err != nil { + panic(err) + } +} + +func (ku *KeysUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(keys.Table, keys.Columns, sqlgraph.NewFieldSpec(keys.FieldID, field.TypeString)) + if ps := ku.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := ku.mutation.VerificationKeys(); ok { + _spec.SetField(keys.FieldVerificationKeys, field.TypeJSON, value) + } + if value, ok := ku.mutation.AppendedVerificationKeys(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, keys.FieldVerificationKeys, value) + }) + } + if value, ok := ku.mutation.SigningKey(); ok { + _spec.SetField(keys.FieldSigningKey, field.TypeJSON, value) + } + if value, ok := ku.mutation.SigningKeyPub(); ok { + _spec.SetField(keys.FieldSigningKeyPub, field.TypeJSON, value) + } + if value, ok := ku.mutation.NextRotation(); ok { + _spec.SetField(keys.FieldNextRotation, field.TypeTime, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, ku.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{keys.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + ku.mutation.done = true + return n, nil +} + +// KeysUpdateOne is the builder for updating a single Keys entity. +type KeysUpdateOne struct { + config + fields []string + hooks []Hook + mutation *KeysMutation +} + +// SetVerificationKeys sets the "verification_keys" field. +func (kuo *KeysUpdateOne) SetVerificationKeys(sk []storage.VerificationKey) *KeysUpdateOne { + kuo.mutation.SetVerificationKeys(sk) + return kuo +} + +// AppendVerificationKeys appends sk to the "verification_keys" field. +func (kuo *KeysUpdateOne) AppendVerificationKeys(sk []storage.VerificationKey) *KeysUpdateOne { + kuo.mutation.AppendVerificationKeys(sk) + return kuo +} + +// SetSigningKey sets the "signing_key" field. +func (kuo *KeysUpdateOne) SetSigningKey(jwk jose.JSONWebKey) *KeysUpdateOne { + kuo.mutation.SetSigningKey(jwk) + return kuo +} + +// SetSigningKeyPub sets the "signing_key_pub" field. +func (kuo *KeysUpdateOne) SetSigningKeyPub(jwk jose.JSONWebKey) *KeysUpdateOne { + kuo.mutation.SetSigningKeyPub(jwk) + return kuo +} + +// SetNextRotation sets the "next_rotation" field. +func (kuo *KeysUpdateOne) SetNextRotation(t time.Time) *KeysUpdateOne { + kuo.mutation.SetNextRotation(t) + return kuo +} + +// Mutation returns the KeysMutation object of the builder. +func (kuo *KeysUpdateOne) Mutation() *KeysMutation { + return kuo.mutation +} + +// Where appends a list predicates to the KeysUpdate builder. +func (kuo *KeysUpdateOne) Where(ps ...predicate.Keys) *KeysUpdateOne { + kuo.mutation.Where(ps...) + return kuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (kuo *KeysUpdateOne) Select(field string, fields ...string) *KeysUpdateOne { + kuo.fields = append([]string{field}, fields...) + return kuo +} + +// Save executes the query and returns the updated Keys entity. +func (kuo *KeysUpdateOne) Save(ctx context.Context) (*Keys, error) { + return withHooks(ctx, kuo.sqlSave, kuo.mutation, kuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (kuo *KeysUpdateOne) SaveX(ctx context.Context) *Keys { + node, err := kuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (kuo *KeysUpdateOne) Exec(ctx context.Context) error { + _, err := kuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (kuo *KeysUpdateOne) ExecX(ctx context.Context) { + if err := kuo.Exec(ctx); err != nil { + panic(err) + } +} + +func (kuo *KeysUpdateOne) sqlSave(ctx context.Context) (_node *Keys, err error) { + _spec := sqlgraph.NewUpdateSpec(keys.Table, keys.Columns, sqlgraph.NewFieldSpec(keys.FieldID, field.TypeString)) + id, ok := kuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "Keys.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := kuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, keys.FieldID) + for _, f := range fields { + if !keys.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != keys.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := kuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := kuo.mutation.VerificationKeys(); ok { + _spec.SetField(keys.FieldVerificationKeys, field.TypeJSON, value) + } + if value, ok := kuo.mutation.AppendedVerificationKeys(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, keys.FieldVerificationKeys, value) + }) + } + if value, ok := kuo.mutation.SigningKey(); ok { + _spec.SetField(keys.FieldSigningKey, field.TypeJSON, value) + } + if value, ok := kuo.mutation.SigningKeyPub(); ok { + _spec.SetField(keys.FieldSigningKeyPub, field.TypeJSON, value) + } + if value, ok := kuo.mutation.NextRotation(); ok { + _spec.SetField(keys.FieldNextRotation, field.TypeTime, value) + } + _node = &Keys{config: kuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, kuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{keys.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + kuo.mutation.done = true + return _node, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/migrate/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/migrate/BUILD new file mode 100644 index 00000000..abbe7561 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/migrate/BUILD @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "migrate", + srcs = [ + "migrate.go", + "schema.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/migrate", + importpath = "github.com/dexidp/dex/storage/ent/db/migrate", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect", + "//vendor/entgo.io/ent/dialect/sql/schema", + "//vendor/entgo.io/ent/schema/field", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/migrate/migrate.go b/vendor/github.com/dexidp/dex/storage/ent/db/migrate/migrate.go new file mode 100644 index 00000000..1956a6bf --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/migrate/migrate.go @@ -0,0 +1,64 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/migrate/schema.go b/vendor/github.com/dexidp/dex/storage/ent/db/migrate/schema.go new file mode 100644 index 00000000..d3295a0c --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/migrate/schema.go @@ -0,0 +1,213 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // AuthCodesColumns holds the columns for the "auth_codes" table. + AuthCodesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "client_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "scopes", Type: field.TypeJSON, Nullable: true}, + {Name: "nonce", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "redirect_uri", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_user_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_username", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_email", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_email_verified", Type: field.TypeBool}, + {Name: "claims_groups", Type: field.TypeJSON, Nullable: true}, + {Name: "claims_preferred_username", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "connector_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "connector_data", Type: field.TypeBytes, Nullable: true}, + {Name: "expiry", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + {Name: "code_challenge", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "code_challenge_method", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + } + // AuthCodesTable holds the schema information for the "auth_codes" table. + AuthCodesTable = &schema.Table{ + Name: "auth_codes", + Columns: AuthCodesColumns, + PrimaryKey: []*schema.Column{AuthCodesColumns[0]}, + } + // AuthRequestsColumns holds the columns for the "auth_requests" table. + AuthRequestsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "client_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "scopes", Type: field.TypeJSON, Nullable: true}, + {Name: "response_types", Type: field.TypeJSON, Nullable: true}, + {Name: "redirect_uri", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "nonce", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "state", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "force_approval_prompt", Type: field.TypeBool}, + {Name: "logged_in", Type: field.TypeBool}, + {Name: "claims_user_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_username", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_email", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_email_verified", Type: field.TypeBool}, + {Name: "claims_groups", Type: field.TypeJSON, Nullable: true}, + {Name: "claims_preferred_username", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "connector_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "connector_data", Type: field.TypeBytes, Nullable: true}, + {Name: "expiry", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + {Name: "code_challenge", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "code_challenge_method", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "hmac_key", Type: field.TypeBytes}, + } + // AuthRequestsTable holds the schema information for the "auth_requests" table. + AuthRequestsTable = &schema.Table{ + Name: "auth_requests", + Columns: AuthRequestsColumns, + PrimaryKey: []*schema.Column{AuthRequestsColumns[0]}, + } + // ConnectorsColumns holds the columns for the "connectors" table. + ConnectorsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, Size: 100, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "type", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "name", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "resource_version", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "config", Type: field.TypeBytes}, + } + // ConnectorsTable holds the schema information for the "connectors" table. + ConnectorsTable = &schema.Table{ + Name: "connectors", + Columns: ConnectorsColumns, + PrimaryKey: []*schema.Column{ConnectorsColumns[0]}, + } + // DeviceRequestsColumns holds the columns for the "device_requests" table. + DeviceRequestsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "user_code", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "device_code", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "client_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "client_secret", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "scopes", Type: field.TypeJSON, Nullable: true}, + {Name: "expiry", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + } + // DeviceRequestsTable holds the schema information for the "device_requests" table. + DeviceRequestsTable = &schema.Table{ + Name: "device_requests", + Columns: DeviceRequestsColumns, + PrimaryKey: []*schema.Column{DeviceRequestsColumns[0]}, + } + // DeviceTokensColumns holds the columns for the "device_tokens" table. + DeviceTokensColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "device_code", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "status", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "token", Type: field.TypeBytes, Nullable: true}, + {Name: "expiry", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + {Name: "last_request", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + {Name: "poll_interval", Type: field.TypeInt}, + {Name: "code_challenge", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "code_challenge_method", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + } + // DeviceTokensTable holds the schema information for the "device_tokens" table. + DeviceTokensTable = &schema.Table{ + Name: "device_tokens", + Columns: DeviceTokensColumns, + PrimaryKey: []*schema.Column{DeviceTokensColumns[0]}, + } + // KeysColumns holds the columns for the "keys" table. + KeysColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "verification_keys", Type: field.TypeJSON}, + {Name: "signing_key", Type: field.TypeJSON}, + {Name: "signing_key_pub", Type: field.TypeJSON}, + {Name: "next_rotation", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + } + // KeysTable holds the schema information for the "keys" table. + KeysTable = &schema.Table{ + Name: "keys", + Columns: KeysColumns, + PrimaryKey: []*schema.Column{KeysColumns[0]}, + } + // Oauth2clientsColumns holds the columns for the "oauth2clients" table. + Oauth2clientsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, Size: 100, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "secret", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "redirect_uris", Type: field.TypeJSON, Nullable: true}, + {Name: "trusted_peers", Type: field.TypeJSON, Nullable: true}, + {Name: "public", Type: field.TypeBool}, + {Name: "name", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "logo_url", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + } + // Oauth2clientsTable holds the schema information for the "oauth2clients" table. + Oauth2clientsTable = &schema.Table{ + Name: "oauth2clients", + Columns: Oauth2clientsColumns, + PrimaryKey: []*schema.Column{Oauth2clientsColumns[0]}, + } + // OfflineSessionsColumns holds the columns for the "offline_sessions" table. + OfflineSessionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "user_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "conn_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "refresh", Type: field.TypeBytes}, + {Name: "connector_data", Type: field.TypeBytes, Nullable: true}, + } + // OfflineSessionsTable holds the schema information for the "offline_sessions" table. + OfflineSessionsTable = &schema.Table{ + Name: "offline_sessions", + Columns: OfflineSessionsColumns, + PrimaryKey: []*schema.Column{OfflineSessionsColumns[0]}, + } + // PasswordsColumns holds the columns for the "passwords" table. + PasswordsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "email", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "hash", Type: field.TypeBytes}, + {Name: "username", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "user_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + } + // PasswordsTable holds the schema information for the "passwords" table. + PasswordsTable = &schema.Table{ + Name: "passwords", + Columns: PasswordsColumns, + PrimaryKey: []*schema.Column{PasswordsColumns[0]}, + } + // RefreshTokensColumns holds the columns for the "refresh_tokens" table. + RefreshTokensColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "client_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "scopes", Type: field.TypeJSON, Nullable: true}, + {Name: "nonce", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_user_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_username", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_email", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "claims_email_verified", Type: field.TypeBool}, + {Name: "claims_groups", Type: field.TypeJSON, Nullable: true}, + {Name: "claims_preferred_username", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "connector_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "connector_data", Type: field.TypeBytes, Nullable: true}, + {Name: "token", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "obsolete_token", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"mysql": "varchar(384)", "postgres": "text", "sqlite3": "text"}}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + {Name: "last_used", Type: field.TypeTime, SchemaType: map[string]string{"mysql": "datetime(3)", "postgres": "timestamptz", "sqlite3": "timestamp"}}, + } + // RefreshTokensTable holds the schema information for the "refresh_tokens" table. + RefreshTokensTable = &schema.Table{ + Name: "refresh_tokens", + Columns: RefreshTokensColumns, + PrimaryKey: []*schema.Column{RefreshTokensColumns[0]}, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + AuthCodesTable, + AuthRequestsTable, + ConnectorsTable, + DeviceRequestsTable, + DeviceTokensTable, + KeysTable, + Oauth2clientsTable, + OfflineSessionsTable, + PasswordsTable, + RefreshTokensTable, + } +) + +func init() { +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/mutation.go b/vendor/github.com/dexidp/dex/storage/ent/db/mutation.go new file mode 100644 index 00000000..aec11425 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/mutation.go @@ -0,0 +1,7982 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/db/authcode" + "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/connector" + "github.com/dexidp/dex/storage/ent/db/devicerequest" + "github.com/dexidp/dex/storage/ent/db/devicetoken" + "github.com/dexidp/dex/storage/ent/db/keys" + "github.com/dexidp/dex/storage/ent/db/oauth2client" + "github.com/dexidp/dex/storage/ent/db/offlinesession" + "github.com/dexidp/dex/storage/ent/db/password" + "github.com/dexidp/dex/storage/ent/db/predicate" + "github.com/dexidp/dex/storage/ent/db/refreshtoken" + jose "gopkg.in/square/go-jose.v2" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeAuthCode = "AuthCode" + TypeAuthRequest = "AuthRequest" + TypeConnector = "Connector" + TypeDeviceRequest = "DeviceRequest" + TypeDeviceToken = "DeviceToken" + TypeKeys = "Keys" + TypeOAuth2Client = "OAuth2Client" + TypeOfflineSession = "OfflineSession" + TypePassword = "Password" + TypeRefreshToken = "RefreshToken" +) + +// AuthCodeMutation represents an operation that mutates the AuthCode nodes in the graph. +type AuthCodeMutation struct { + config + op Op + typ string + id *string + client_id *string + scopes *[]string + appendscopes []string + nonce *string + redirect_uri *string + claims_user_id *string + claims_username *string + claims_email *string + claims_email_verified *bool + claims_groups *[]string + appendclaims_groups []string + claims_preferred_username *string + connector_id *string + connector_data *[]byte + expiry *time.Time + code_challenge *string + code_challenge_method *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*AuthCode, error) + predicates []predicate.AuthCode +} + +var _ ent.Mutation = (*AuthCodeMutation)(nil) + +// authcodeOption allows management of the mutation configuration using functional options. +type authcodeOption func(*AuthCodeMutation) + +// newAuthCodeMutation creates new mutation for the AuthCode entity. +func newAuthCodeMutation(c config, op Op, opts ...authcodeOption) *AuthCodeMutation { + m := &AuthCodeMutation{ + config: c, + op: op, + typ: TypeAuthCode, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAuthCodeID sets the ID field of the mutation. +func withAuthCodeID(id string) authcodeOption { + return func(m *AuthCodeMutation) { + var ( + err error + once sync.Once + value *AuthCode + ) + m.oldValue = func(ctx context.Context) (*AuthCode, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().AuthCode.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAuthCode sets the old AuthCode of the mutation. +func withAuthCode(node *AuthCode) authcodeOption { + return func(m *AuthCodeMutation) { + m.oldValue = func(context.Context) (*AuthCode, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AuthCodeMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AuthCodeMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of AuthCode entities. +func (m *AuthCodeMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AuthCodeMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AuthCodeMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().AuthCode.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetClientID sets the "client_id" field. +func (m *AuthCodeMutation) SetClientID(s string) { + m.client_id = &s +} + +// ClientID returns the value of the "client_id" field in the mutation. +func (m *AuthCodeMutation) ClientID() (r string, exists bool) { + v := m.client_id + if v == nil { + return + } + return *v, true +} + +// OldClientID returns the old "client_id" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldClientID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClientID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClientID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClientID: %w", err) + } + return oldValue.ClientID, nil +} + +// ResetClientID resets all changes to the "client_id" field. +func (m *AuthCodeMutation) ResetClientID() { + m.client_id = nil +} + +// SetScopes sets the "scopes" field. +func (m *AuthCodeMutation) SetScopes(s []string) { + m.scopes = &s + m.appendscopes = nil +} + +// Scopes returns the value of the "scopes" field in the mutation. +func (m *AuthCodeMutation) Scopes() (r []string, exists bool) { + v := m.scopes + if v == nil { + return + } + return *v, true +} + +// OldScopes returns the old "scopes" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldScopes(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScopes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScopes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScopes: %w", err) + } + return oldValue.Scopes, nil +} + +// AppendScopes adds s to the "scopes" field. +func (m *AuthCodeMutation) AppendScopes(s []string) { + m.appendscopes = append(m.appendscopes, s...) +} + +// AppendedScopes returns the list of values that were appended to the "scopes" field in this mutation. +func (m *AuthCodeMutation) AppendedScopes() ([]string, bool) { + if len(m.appendscopes) == 0 { + return nil, false + } + return m.appendscopes, true +} + +// ClearScopes clears the value of the "scopes" field. +func (m *AuthCodeMutation) ClearScopes() { + m.scopes = nil + m.appendscopes = nil + m.clearedFields[authcode.FieldScopes] = struct{}{} +} + +// ScopesCleared returns if the "scopes" field was cleared in this mutation. +func (m *AuthCodeMutation) ScopesCleared() bool { + _, ok := m.clearedFields[authcode.FieldScopes] + return ok +} + +// ResetScopes resets all changes to the "scopes" field. +func (m *AuthCodeMutation) ResetScopes() { + m.scopes = nil + m.appendscopes = nil + delete(m.clearedFields, authcode.FieldScopes) +} + +// SetNonce sets the "nonce" field. +func (m *AuthCodeMutation) SetNonce(s string) { + m.nonce = &s +} + +// Nonce returns the value of the "nonce" field in the mutation. +func (m *AuthCodeMutation) Nonce() (r string, exists bool) { + v := m.nonce + if v == nil { + return + } + return *v, true +} + +// OldNonce returns the old "nonce" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldNonce(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNonce is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNonce requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNonce: %w", err) + } + return oldValue.Nonce, nil +} + +// ResetNonce resets all changes to the "nonce" field. +func (m *AuthCodeMutation) ResetNonce() { + m.nonce = nil +} + +// SetRedirectURI sets the "redirect_uri" field. +func (m *AuthCodeMutation) SetRedirectURI(s string) { + m.redirect_uri = &s +} + +// RedirectURI returns the value of the "redirect_uri" field in the mutation. +func (m *AuthCodeMutation) RedirectURI() (r string, exists bool) { + v := m.redirect_uri + if v == nil { + return + } + return *v, true +} + +// OldRedirectURI returns the old "redirect_uri" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldRedirectURI(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRedirectURI is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRedirectURI requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRedirectURI: %w", err) + } + return oldValue.RedirectURI, nil +} + +// ResetRedirectURI resets all changes to the "redirect_uri" field. +func (m *AuthCodeMutation) ResetRedirectURI() { + m.redirect_uri = nil +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (m *AuthCodeMutation) SetClaimsUserID(s string) { + m.claims_user_id = &s +} + +// ClaimsUserID returns the value of the "claims_user_id" field in the mutation. +func (m *AuthCodeMutation) ClaimsUserID() (r string, exists bool) { + v := m.claims_user_id + if v == nil { + return + } + return *v, true +} + +// OldClaimsUserID returns the old "claims_user_id" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldClaimsUserID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsUserID: %w", err) + } + return oldValue.ClaimsUserID, nil +} + +// ResetClaimsUserID resets all changes to the "claims_user_id" field. +func (m *AuthCodeMutation) ResetClaimsUserID() { + m.claims_user_id = nil +} + +// SetClaimsUsername sets the "claims_username" field. +func (m *AuthCodeMutation) SetClaimsUsername(s string) { + m.claims_username = &s +} + +// ClaimsUsername returns the value of the "claims_username" field in the mutation. +func (m *AuthCodeMutation) ClaimsUsername() (r string, exists bool) { + v := m.claims_username + if v == nil { + return + } + return *v, true +} + +// OldClaimsUsername returns the old "claims_username" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldClaimsUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsUsername: %w", err) + } + return oldValue.ClaimsUsername, nil +} + +// ResetClaimsUsername resets all changes to the "claims_username" field. +func (m *AuthCodeMutation) ResetClaimsUsername() { + m.claims_username = nil +} + +// SetClaimsEmail sets the "claims_email" field. +func (m *AuthCodeMutation) SetClaimsEmail(s string) { + m.claims_email = &s +} + +// ClaimsEmail returns the value of the "claims_email" field in the mutation. +func (m *AuthCodeMutation) ClaimsEmail() (r string, exists bool) { + v := m.claims_email + if v == nil { + return + } + return *v, true +} + +// OldClaimsEmail returns the old "claims_email" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldClaimsEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsEmail: %w", err) + } + return oldValue.ClaimsEmail, nil +} + +// ResetClaimsEmail resets all changes to the "claims_email" field. +func (m *AuthCodeMutation) ResetClaimsEmail() { + m.claims_email = nil +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (m *AuthCodeMutation) SetClaimsEmailVerified(b bool) { + m.claims_email_verified = &b +} + +// ClaimsEmailVerified returns the value of the "claims_email_verified" field in the mutation. +func (m *AuthCodeMutation) ClaimsEmailVerified() (r bool, exists bool) { + v := m.claims_email_verified + if v == nil { + return + } + return *v, true +} + +// OldClaimsEmailVerified returns the old "claims_email_verified" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldClaimsEmailVerified(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsEmailVerified is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsEmailVerified requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsEmailVerified: %w", err) + } + return oldValue.ClaimsEmailVerified, nil +} + +// ResetClaimsEmailVerified resets all changes to the "claims_email_verified" field. +func (m *AuthCodeMutation) ResetClaimsEmailVerified() { + m.claims_email_verified = nil +} + +// SetClaimsGroups sets the "claims_groups" field. +func (m *AuthCodeMutation) SetClaimsGroups(s []string) { + m.claims_groups = &s + m.appendclaims_groups = nil +} + +// ClaimsGroups returns the value of the "claims_groups" field in the mutation. +func (m *AuthCodeMutation) ClaimsGroups() (r []string, exists bool) { + v := m.claims_groups + if v == nil { + return + } + return *v, true +} + +// OldClaimsGroups returns the old "claims_groups" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldClaimsGroups(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsGroups is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsGroups requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsGroups: %w", err) + } + return oldValue.ClaimsGroups, nil +} + +// AppendClaimsGroups adds s to the "claims_groups" field. +func (m *AuthCodeMutation) AppendClaimsGroups(s []string) { + m.appendclaims_groups = append(m.appendclaims_groups, s...) +} + +// AppendedClaimsGroups returns the list of values that were appended to the "claims_groups" field in this mutation. +func (m *AuthCodeMutation) AppendedClaimsGroups() ([]string, bool) { + if len(m.appendclaims_groups) == 0 { + return nil, false + } + return m.appendclaims_groups, true +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (m *AuthCodeMutation) ClearClaimsGroups() { + m.claims_groups = nil + m.appendclaims_groups = nil + m.clearedFields[authcode.FieldClaimsGroups] = struct{}{} +} + +// ClaimsGroupsCleared returns if the "claims_groups" field was cleared in this mutation. +func (m *AuthCodeMutation) ClaimsGroupsCleared() bool { + _, ok := m.clearedFields[authcode.FieldClaimsGroups] + return ok +} + +// ResetClaimsGroups resets all changes to the "claims_groups" field. +func (m *AuthCodeMutation) ResetClaimsGroups() { + m.claims_groups = nil + m.appendclaims_groups = nil + delete(m.clearedFields, authcode.FieldClaimsGroups) +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (m *AuthCodeMutation) SetClaimsPreferredUsername(s string) { + m.claims_preferred_username = &s +} + +// ClaimsPreferredUsername returns the value of the "claims_preferred_username" field in the mutation. +func (m *AuthCodeMutation) ClaimsPreferredUsername() (r string, exists bool) { + v := m.claims_preferred_username + if v == nil { + return + } + return *v, true +} + +// OldClaimsPreferredUsername returns the old "claims_preferred_username" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldClaimsPreferredUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsPreferredUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsPreferredUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsPreferredUsername: %w", err) + } + return oldValue.ClaimsPreferredUsername, nil +} + +// ResetClaimsPreferredUsername resets all changes to the "claims_preferred_username" field. +func (m *AuthCodeMutation) ResetClaimsPreferredUsername() { + m.claims_preferred_username = nil +} + +// SetConnectorID sets the "connector_id" field. +func (m *AuthCodeMutation) SetConnectorID(s string) { + m.connector_id = &s +} + +// ConnectorID returns the value of the "connector_id" field in the mutation. +func (m *AuthCodeMutation) ConnectorID() (r string, exists bool) { + v := m.connector_id + if v == nil { + return + } + return *v, true +} + +// OldConnectorID returns the old "connector_id" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldConnectorID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConnectorID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConnectorID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConnectorID: %w", err) + } + return oldValue.ConnectorID, nil +} + +// ResetConnectorID resets all changes to the "connector_id" field. +func (m *AuthCodeMutation) ResetConnectorID() { + m.connector_id = nil +} + +// SetConnectorData sets the "connector_data" field. +func (m *AuthCodeMutation) SetConnectorData(b []byte) { + m.connector_data = &b +} + +// ConnectorData returns the value of the "connector_data" field in the mutation. +func (m *AuthCodeMutation) ConnectorData() (r []byte, exists bool) { + v := m.connector_data + if v == nil { + return + } + return *v, true +} + +// OldConnectorData returns the old "connector_data" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldConnectorData(ctx context.Context) (v *[]byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConnectorData is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConnectorData requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConnectorData: %w", err) + } + return oldValue.ConnectorData, nil +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (m *AuthCodeMutation) ClearConnectorData() { + m.connector_data = nil + m.clearedFields[authcode.FieldConnectorData] = struct{}{} +} + +// ConnectorDataCleared returns if the "connector_data" field was cleared in this mutation. +func (m *AuthCodeMutation) ConnectorDataCleared() bool { + _, ok := m.clearedFields[authcode.FieldConnectorData] + return ok +} + +// ResetConnectorData resets all changes to the "connector_data" field. +func (m *AuthCodeMutation) ResetConnectorData() { + m.connector_data = nil + delete(m.clearedFields, authcode.FieldConnectorData) +} + +// SetExpiry sets the "expiry" field. +func (m *AuthCodeMutation) SetExpiry(t time.Time) { + m.expiry = &t +} + +// Expiry returns the value of the "expiry" field in the mutation. +func (m *AuthCodeMutation) Expiry() (r time.Time, exists bool) { + v := m.expiry + if v == nil { + return + } + return *v, true +} + +// OldExpiry returns the old "expiry" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldExpiry(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiry is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiry requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiry: %w", err) + } + return oldValue.Expiry, nil +} + +// ResetExpiry resets all changes to the "expiry" field. +func (m *AuthCodeMutation) ResetExpiry() { + m.expiry = nil +} + +// SetCodeChallenge sets the "code_challenge" field. +func (m *AuthCodeMutation) SetCodeChallenge(s string) { + m.code_challenge = &s +} + +// CodeChallenge returns the value of the "code_challenge" field in the mutation. +func (m *AuthCodeMutation) CodeChallenge() (r string, exists bool) { + v := m.code_challenge + if v == nil { + return + } + return *v, true +} + +// OldCodeChallenge returns the old "code_challenge" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldCodeChallenge(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCodeChallenge is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCodeChallenge requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCodeChallenge: %w", err) + } + return oldValue.CodeChallenge, nil +} + +// ResetCodeChallenge resets all changes to the "code_challenge" field. +func (m *AuthCodeMutation) ResetCodeChallenge() { + m.code_challenge = nil +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (m *AuthCodeMutation) SetCodeChallengeMethod(s string) { + m.code_challenge_method = &s +} + +// CodeChallengeMethod returns the value of the "code_challenge_method" field in the mutation. +func (m *AuthCodeMutation) CodeChallengeMethod() (r string, exists bool) { + v := m.code_challenge_method + if v == nil { + return + } + return *v, true +} + +// OldCodeChallengeMethod returns the old "code_challenge_method" field's value of the AuthCode entity. +// If the AuthCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthCodeMutation) OldCodeChallengeMethod(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCodeChallengeMethod is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCodeChallengeMethod requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCodeChallengeMethod: %w", err) + } + return oldValue.CodeChallengeMethod, nil +} + +// ResetCodeChallengeMethod resets all changes to the "code_challenge_method" field. +func (m *AuthCodeMutation) ResetCodeChallengeMethod() { + m.code_challenge_method = nil +} + +// Where appends a list predicates to the AuthCodeMutation builder. +func (m *AuthCodeMutation) Where(ps ...predicate.AuthCode) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AuthCodeMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AuthCodeMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AuthCode, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AuthCodeMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AuthCodeMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (AuthCode). +func (m *AuthCodeMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AuthCodeMutation) Fields() []string { + fields := make([]string, 0, 15) + if m.client_id != nil { + fields = append(fields, authcode.FieldClientID) + } + if m.scopes != nil { + fields = append(fields, authcode.FieldScopes) + } + if m.nonce != nil { + fields = append(fields, authcode.FieldNonce) + } + if m.redirect_uri != nil { + fields = append(fields, authcode.FieldRedirectURI) + } + if m.claims_user_id != nil { + fields = append(fields, authcode.FieldClaimsUserID) + } + if m.claims_username != nil { + fields = append(fields, authcode.FieldClaimsUsername) + } + if m.claims_email != nil { + fields = append(fields, authcode.FieldClaimsEmail) + } + if m.claims_email_verified != nil { + fields = append(fields, authcode.FieldClaimsEmailVerified) + } + if m.claims_groups != nil { + fields = append(fields, authcode.FieldClaimsGroups) + } + if m.claims_preferred_username != nil { + fields = append(fields, authcode.FieldClaimsPreferredUsername) + } + if m.connector_id != nil { + fields = append(fields, authcode.FieldConnectorID) + } + if m.connector_data != nil { + fields = append(fields, authcode.FieldConnectorData) + } + if m.expiry != nil { + fields = append(fields, authcode.FieldExpiry) + } + if m.code_challenge != nil { + fields = append(fields, authcode.FieldCodeChallenge) + } + if m.code_challenge_method != nil { + fields = append(fields, authcode.FieldCodeChallengeMethod) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AuthCodeMutation) Field(name string) (ent.Value, bool) { + switch name { + case authcode.FieldClientID: + return m.ClientID() + case authcode.FieldScopes: + return m.Scopes() + case authcode.FieldNonce: + return m.Nonce() + case authcode.FieldRedirectURI: + return m.RedirectURI() + case authcode.FieldClaimsUserID: + return m.ClaimsUserID() + case authcode.FieldClaimsUsername: + return m.ClaimsUsername() + case authcode.FieldClaimsEmail: + return m.ClaimsEmail() + case authcode.FieldClaimsEmailVerified: + return m.ClaimsEmailVerified() + case authcode.FieldClaimsGroups: + return m.ClaimsGroups() + case authcode.FieldClaimsPreferredUsername: + return m.ClaimsPreferredUsername() + case authcode.FieldConnectorID: + return m.ConnectorID() + case authcode.FieldConnectorData: + return m.ConnectorData() + case authcode.FieldExpiry: + return m.Expiry() + case authcode.FieldCodeChallenge: + return m.CodeChallenge() + case authcode.FieldCodeChallengeMethod: + return m.CodeChallengeMethod() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AuthCodeMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case authcode.FieldClientID: + return m.OldClientID(ctx) + case authcode.FieldScopes: + return m.OldScopes(ctx) + case authcode.FieldNonce: + return m.OldNonce(ctx) + case authcode.FieldRedirectURI: + return m.OldRedirectURI(ctx) + case authcode.FieldClaimsUserID: + return m.OldClaimsUserID(ctx) + case authcode.FieldClaimsUsername: + return m.OldClaimsUsername(ctx) + case authcode.FieldClaimsEmail: + return m.OldClaimsEmail(ctx) + case authcode.FieldClaimsEmailVerified: + return m.OldClaimsEmailVerified(ctx) + case authcode.FieldClaimsGroups: + return m.OldClaimsGroups(ctx) + case authcode.FieldClaimsPreferredUsername: + return m.OldClaimsPreferredUsername(ctx) + case authcode.FieldConnectorID: + return m.OldConnectorID(ctx) + case authcode.FieldConnectorData: + return m.OldConnectorData(ctx) + case authcode.FieldExpiry: + return m.OldExpiry(ctx) + case authcode.FieldCodeChallenge: + return m.OldCodeChallenge(ctx) + case authcode.FieldCodeChallengeMethod: + return m.OldCodeChallengeMethod(ctx) + } + return nil, fmt.Errorf("unknown AuthCode field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AuthCodeMutation) SetField(name string, value ent.Value) error { + switch name { + case authcode.FieldClientID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClientID(v) + return nil + case authcode.FieldScopes: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScopes(v) + return nil + case authcode.FieldNonce: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNonce(v) + return nil + case authcode.FieldRedirectURI: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRedirectURI(v) + return nil + case authcode.FieldClaimsUserID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsUserID(v) + return nil + case authcode.FieldClaimsUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsUsername(v) + return nil + case authcode.FieldClaimsEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsEmail(v) + return nil + case authcode.FieldClaimsEmailVerified: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsEmailVerified(v) + return nil + case authcode.FieldClaimsGroups: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsGroups(v) + return nil + case authcode.FieldClaimsPreferredUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsPreferredUsername(v) + return nil + case authcode.FieldConnectorID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnectorID(v) + return nil + case authcode.FieldConnectorData: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnectorData(v) + return nil + case authcode.FieldExpiry: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiry(v) + return nil + case authcode.FieldCodeChallenge: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCodeChallenge(v) + return nil + case authcode.FieldCodeChallengeMethod: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCodeChallengeMethod(v) + return nil + } + return fmt.Errorf("unknown AuthCode field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AuthCodeMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AuthCodeMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AuthCodeMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown AuthCode numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AuthCodeMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(authcode.FieldScopes) { + fields = append(fields, authcode.FieldScopes) + } + if m.FieldCleared(authcode.FieldClaimsGroups) { + fields = append(fields, authcode.FieldClaimsGroups) + } + if m.FieldCleared(authcode.FieldConnectorData) { + fields = append(fields, authcode.FieldConnectorData) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AuthCodeMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AuthCodeMutation) ClearField(name string) error { + switch name { + case authcode.FieldScopes: + m.ClearScopes() + return nil + case authcode.FieldClaimsGroups: + m.ClearClaimsGroups() + return nil + case authcode.FieldConnectorData: + m.ClearConnectorData() + return nil + } + return fmt.Errorf("unknown AuthCode nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AuthCodeMutation) ResetField(name string) error { + switch name { + case authcode.FieldClientID: + m.ResetClientID() + return nil + case authcode.FieldScopes: + m.ResetScopes() + return nil + case authcode.FieldNonce: + m.ResetNonce() + return nil + case authcode.FieldRedirectURI: + m.ResetRedirectURI() + return nil + case authcode.FieldClaimsUserID: + m.ResetClaimsUserID() + return nil + case authcode.FieldClaimsUsername: + m.ResetClaimsUsername() + return nil + case authcode.FieldClaimsEmail: + m.ResetClaimsEmail() + return nil + case authcode.FieldClaimsEmailVerified: + m.ResetClaimsEmailVerified() + return nil + case authcode.FieldClaimsGroups: + m.ResetClaimsGroups() + return nil + case authcode.FieldClaimsPreferredUsername: + m.ResetClaimsPreferredUsername() + return nil + case authcode.FieldConnectorID: + m.ResetConnectorID() + return nil + case authcode.FieldConnectorData: + m.ResetConnectorData() + return nil + case authcode.FieldExpiry: + m.ResetExpiry() + return nil + case authcode.FieldCodeChallenge: + m.ResetCodeChallenge() + return nil + case authcode.FieldCodeChallengeMethod: + m.ResetCodeChallengeMethod() + return nil + } + return fmt.Errorf("unknown AuthCode field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AuthCodeMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AuthCodeMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AuthCodeMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AuthCodeMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AuthCodeMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AuthCodeMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AuthCodeMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown AuthCode unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AuthCodeMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown AuthCode edge %s", name) +} + +// AuthRequestMutation represents an operation that mutates the AuthRequest nodes in the graph. +type AuthRequestMutation struct { + config + op Op + typ string + id *string + client_id *string + scopes *[]string + appendscopes []string + response_types *[]string + appendresponse_types []string + redirect_uri *string + nonce *string + state *string + force_approval_prompt *bool + logged_in *bool + claims_user_id *string + claims_username *string + claims_email *string + claims_email_verified *bool + claims_groups *[]string + appendclaims_groups []string + claims_preferred_username *string + connector_id *string + connector_data *[]byte + expiry *time.Time + code_challenge *string + code_challenge_method *string + hmac_key *[]byte + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*AuthRequest, error) + predicates []predicate.AuthRequest +} + +var _ ent.Mutation = (*AuthRequestMutation)(nil) + +// authrequestOption allows management of the mutation configuration using functional options. +type authrequestOption func(*AuthRequestMutation) + +// newAuthRequestMutation creates new mutation for the AuthRequest entity. +func newAuthRequestMutation(c config, op Op, opts ...authrequestOption) *AuthRequestMutation { + m := &AuthRequestMutation{ + config: c, + op: op, + typ: TypeAuthRequest, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAuthRequestID sets the ID field of the mutation. +func withAuthRequestID(id string) authrequestOption { + return func(m *AuthRequestMutation) { + var ( + err error + once sync.Once + value *AuthRequest + ) + m.oldValue = func(ctx context.Context) (*AuthRequest, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().AuthRequest.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAuthRequest sets the old AuthRequest of the mutation. +func withAuthRequest(node *AuthRequest) authrequestOption { + return func(m *AuthRequestMutation) { + m.oldValue = func(context.Context) (*AuthRequest, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AuthRequestMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AuthRequestMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of AuthRequest entities. +func (m *AuthRequestMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AuthRequestMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AuthRequestMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().AuthRequest.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetClientID sets the "client_id" field. +func (m *AuthRequestMutation) SetClientID(s string) { + m.client_id = &s +} + +// ClientID returns the value of the "client_id" field in the mutation. +func (m *AuthRequestMutation) ClientID() (r string, exists bool) { + v := m.client_id + if v == nil { + return + } + return *v, true +} + +// OldClientID returns the old "client_id" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldClientID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClientID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClientID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClientID: %w", err) + } + return oldValue.ClientID, nil +} + +// ResetClientID resets all changes to the "client_id" field. +func (m *AuthRequestMutation) ResetClientID() { + m.client_id = nil +} + +// SetScopes sets the "scopes" field. +func (m *AuthRequestMutation) SetScopes(s []string) { + m.scopes = &s + m.appendscopes = nil +} + +// Scopes returns the value of the "scopes" field in the mutation. +func (m *AuthRequestMutation) Scopes() (r []string, exists bool) { + v := m.scopes + if v == nil { + return + } + return *v, true +} + +// OldScopes returns the old "scopes" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldScopes(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScopes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScopes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScopes: %w", err) + } + return oldValue.Scopes, nil +} + +// AppendScopes adds s to the "scopes" field. +func (m *AuthRequestMutation) AppendScopes(s []string) { + m.appendscopes = append(m.appendscopes, s...) +} + +// AppendedScopes returns the list of values that were appended to the "scopes" field in this mutation. +func (m *AuthRequestMutation) AppendedScopes() ([]string, bool) { + if len(m.appendscopes) == 0 { + return nil, false + } + return m.appendscopes, true +} + +// ClearScopes clears the value of the "scopes" field. +func (m *AuthRequestMutation) ClearScopes() { + m.scopes = nil + m.appendscopes = nil + m.clearedFields[authrequest.FieldScopes] = struct{}{} +} + +// ScopesCleared returns if the "scopes" field was cleared in this mutation. +func (m *AuthRequestMutation) ScopesCleared() bool { + _, ok := m.clearedFields[authrequest.FieldScopes] + return ok +} + +// ResetScopes resets all changes to the "scopes" field. +func (m *AuthRequestMutation) ResetScopes() { + m.scopes = nil + m.appendscopes = nil + delete(m.clearedFields, authrequest.FieldScopes) +} + +// SetResponseTypes sets the "response_types" field. +func (m *AuthRequestMutation) SetResponseTypes(s []string) { + m.response_types = &s + m.appendresponse_types = nil +} + +// ResponseTypes returns the value of the "response_types" field in the mutation. +func (m *AuthRequestMutation) ResponseTypes() (r []string, exists bool) { + v := m.response_types + if v == nil { + return + } + return *v, true +} + +// OldResponseTypes returns the old "response_types" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldResponseTypes(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldResponseTypes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldResponseTypes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldResponseTypes: %w", err) + } + return oldValue.ResponseTypes, nil +} + +// AppendResponseTypes adds s to the "response_types" field. +func (m *AuthRequestMutation) AppendResponseTypes(s []string) { + m.appendresponse_types = append(m.appendresponse_types, s...) +} + +// AppendedResponseTypes returns the list of values that were appended to the "response_types" field in this mutation. +func (m *AuthRequestMutation) AppendedResponseTypes() ([]string, bool) { + if len(m.appendresponse_types) == 0 { + return nil, false + } + return m.appendresponse_types, true +} + +// ClearResponseTypes clears the value of the "response_types" field. +func (m *AuthRequestMutation) ClearResponseTypes() { + m.response_types = nil + m.appendresponse_types = nil + m.clearedFields[authrequest.FieldResponseTypes] = struct{}{} +} + +// ResponseTypesCleared returns if the "response_types" field was cleared in this mutation. +func (m *AuthRequestMutation) ResponseTypesCleared() bool { + _, ok := m.clearedFields[authrequest.FieldResponseTypes] + return ok +} + +// ResetResponseTypes resets all changes to the "response_types" field. +func (m *AuthRequestMutation) ResetResponseTypes() { + m.response_types = nil + m.appendresponse_types = nil + delete(m.clearedFields, authrequest.FieldResponseTypes) +} + +// SetRedirectURI sets the "redirect_uri" field. +func (m *AuthRequestMutation) SetRedirectURI(s string) { + m.redirect_uri = &s +} + +// RedirectURI returns the value of the "redirect_uri" field in the mutation. +func (m *AuthRequestMutation) RedirectURI() (r string, exists bool) { + v := m.redirect_uri + if v == nil { + return + } + return *v, true +} + +// OldRedirectURI returns the old "redirect_uri" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldRedirectURI(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRedirectURI is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRedirectURI requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRedirectURI: %w", err) + } + return oldValue.RedirectURI, nil +} + +// ResetRedirectURI resets all changes to the "redirect_uri" field. +func (m *AuthRequestMutation) ResetRedirectURI() { + m.redirect_uri = nil +} + +// SetNonce sets the "nonce" field. +func (m *AuthRequestMutation) SetNonce(s string) { + m.nonce = &s +} + +// Nonce returns the value of the "nonce" field in the mutation. +func (m *AuthRequestMutation) Nonce() (r string, exists bool) { + v := m.nonce + if v == nil { + return + } + return *v, true +} + +// OldNonce returns the old "nonce" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldNonce(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNonce is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNonce requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNonce: %w", err) + } + return oldValue.Nonce, nil +} + +// ResetNonce resets all changes to the "nonce" field. +func (m *AuthRequestMutation) ResetNonce() { + m.nonce = nil +} + +// SetState sets the "state" field. +func (m *AuthRequestMutation) SetState(s string) { + m.state = &s +} + +// State returns the value of the "state" field in the mutation. +func (m *AuthRequestMutation) State() (r string, exists bool) { + v := m.state + if v == nil { + return + } + return *v, true +} + +// OldState returns the old "state" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldState(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldState is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldState requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldState: %w", err) + } + return oldValue.State, nil +} + +// ResetState resets all changes to the "state" field. +func (m *AuthRequestMutation) ResetState() { + m.state = nil +} + +// SetForceApprovalPrompt sets the "force_approval_prompt" field. +func (m *AuthRequestMutation) SetForceApprovalPrompt(b bool) { + m.force_approval_prompt = &b +} + +// ForceApprovalPrompt returns the value of the "force_approval_prompt" field in the mutation. +func (m *AuthRequestMutation) ForceApprovalPrompt() (r bool, exists bool) { + v := m.force_approval_prompt + if v == nil { + return + } + return *v, true +} + +// OldForceApprovalPrompt returns the old "force_approval_prompt" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldForceApprovalPrompt(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldForceApprovalPrompt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldForceApprovalPrompt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldForceApprovalPrompt: %w", err) + } + return oldValue.ForceApprovalPrompt, nil +} + +// ResetForceApprovalPrompt resets all changes to the "force_approval_prompt" field. +func (m *AuthRequestMutation) ResetForceApprovalPrompt() { + m.force_approval_prompt = nil +} + +// SetLoggedIn sets the "logged_in" field. +func (m *AuthRequestMutation) SetLoggedIn(b bool) { + m.logged_in = &b +} + +// LoggedIn returns the value of the "logged_in" field in the mutation. +func (m *AuthRequestMutation) LoggedIn() (r bool, exists bool) { + v := m.logged_in + if v == nil { + return + } + return *v, true +} + +// OldLoggedIn returns the old "logged_in" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldLoggedIn(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLoggedIn is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLoggedIn requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLoggedIn: %w", err) + } + return oldValue.LoggedIn, nil +} + +// ResetLoggedIn resets all changes to the "logged_in" field. +func (m *AuthRequestMutation) ResetLoggedIn() { + m.logged_in = nil +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (m *AuthRequestMutation) SetClaimsUserID(s string) { + m.claims_user_id = &s +} + +// ClaimsUserID returns the value of the "claims_user_id" field in the mutation. +func (m *AuthRequestMutation) ClaimsUserID() (r string, exists bool) { + v := m.claims_user_id + if v == nil { + return + } + return *v, true +} + +// OldClaimsUserID returns the old "claims_user_id" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldClaimsUserID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsUserID: %w", err) + } + return oldValue.ClaimsUserID, nil +} + +// ResetClaimsUserID resets all changes to the "claims_user_id" field. +func (m *AuthRequestMutation) ResetClaimsUserID() { + m.claims_user_id = nil +} + +// SetClaimsUsername sets the "claims_username" field. +func (m *AuthRequestMutation) SetClaimsUsername(s string) { + m.claims_username = &s +} + +// ClaimsUsername returns the value of the "claims_username" field in the mutation. +func (m *AuthRequestMutation) ClaimsUsername() (r string, exists bool) { + v := m.claims_username + if v == nil { + return + } + return *v, true +} + +// OldClaimsUsername returns the old "claims_username" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldClaimsUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsUsername: %w", err) + } + return oldValue.ClaimsUsername, nil +} + +// ResetClaimsUsername resets all changes to the "claims_username" field. +func (m *AuthRequestMutation) ResetClaimsUsername() { + m.claims_username = nil +} + +// SetClaimsEmail sets the "claims_email" field. +func (m *AuthRequestMutation) SetClaimsEmail(s string) { + m.claims_email = &s +} + +// ClaimsEmail returns the value of the "claims_email" field in the mutation. +func (m *AuthRequestMutation) ClaimsEmail() (r string, exists bool) { + v := m.claims_email + if v == nil { + return + } + return *v, true +} + +// OldClaimsEmail returns the old "claims_email" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldClaimsEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsEmail: %w", err) + } + return oldValue.ClaimsEmail, nil +} + +// ResetClaimsEmail resets all changes to the "claims_email" field. +func (m *AuthRequestMutation) ResetClaimsEmail() { + m.claims_email = nil +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (m *AuthRequestMutation) SetClaimsEmailVerified(b bool) { + m.claims_email_verified = &b +} + +// ClaimsEmailVerified returns the value of the "claims_email_verified" field in the mutation. +func (m *AuthRequestMutation) ClaimsEmailVerified() (r bool, exists bool) { + v := m.claims_email_verified + if v == nil { + return + } + return *v, true +} + +// OldClaimsEmailVerified returns the old "claims_email_verified" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldClaimsEmailVerified(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsEmailVerified is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsEmailVerified requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsEmailVerified: %w", err) + } + return oldValue.ClaimsEmailVerified, nil +} + +// ResetClaimsEmailVerified resets all changes to the "claims_email_verified" field. +func (m *AuthRequestMutation) ResetClaimsEmailVerified() { + m.claims_email_verified = nil +} + +// SetClaimsGroups sets the "claims_groups" field. +func (m *AuthRequestMutation) SetClaimsGroups(s []string) { + m.claims_groups = &s + m.appendclaims_groups = nil +} + +// ClaimsGroups returns the value of the "claims_groups" field in the mutation. +func (m *AuthRequestMutation) ClaimsGroups() (r []string, exists bool) { + v := m.claims_groups + if v == nil { + return + } + return *v, true +} + +// OldClaimsGroups returns the old "claims_groups" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldClaimsGroups(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsGroups is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsGroups requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsGroups: %w", err) + } + return oldValue.ClaimsGroups, nil +} + +// AppendClaimsGroups adds s to the "claims_groups" field. +func (m *AuthRequestMutation) AppendClaimsGroups(s []string) { + m.appendclaims_groups = append(m.appendclaims_groups, s...) +} + +// AppendedClaimsGroups returns the list of values that were appended to the "claims_groups" field in this mutation. +func (m *AuthRequestMutation) AppendedClaimsGroups() ([]string, bool) { + if len(m.appendclaims_groups) == 0 { + return nil, false + } + return m.appendclaims_groups, true +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (m *AuthRequestMutation) ClearClaimsGroups() { + m.claims_groups = nil + m.appendclaims_groups = nil + m.clearedFields[authrequest.FieldClaimsGroups] = struct{}{} +} + +// ClaimsGroupsCleared returns if the "claims_groups" field was cleared in this mutation. +func (m *AuthRequestMutation) ClaimsGroupsCleared() bool { + _, ok := m.clearedFields[authrequest.FieldClaimsGroups] + return ok +} + +// ResetClaimsGroups resets all changes to the "claims_groups" field. +func (m *AuthRequestMutation) ResetClaimsGroups() { + m.claims_groups = nil + m.appendclaims_groups = nil + delete(m.clearedFields, authrequest.FieldClaimsGroups) +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (m *AuthRequestMutation) SetClaimsPreferredUsername(s string) { + m.claims_preferred_username = &s +} + +// ClaimsPreferredUsername returns the value of the "claims_preferred_username" field in the mutation. +func (m *AuthRequestMutation) ClaimsPreferredUsername() (r string, exists bool) { + v := m.claims_preferred_username + if v == nil { + return + } + return *v, true +} + +// OldClaimsPreferredUsername returns the old "claims_preferred_username" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldClaimsPreferredUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsPreferredUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsPreferredUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsPreferredUsername: %w", err) + } + return oldValue.ClaimsPreferredUsername, nil +} + +// ResetClaimsPreferredUsername resets all changes to the "claims_preferred_username" field. +func (m *AuthRequestMutation) ResetClaimsPreferredUsername() { + m.claims_preferred_username = nil +} + +// SetConnectorID sets the "connector_id" field. +func (m *AuthRequestMutation) SetConnectorID(s string) { + m.connector_id = &s +} + +// ConnectorID returns the value of the "connector_id" field in the mutation. +func (m *AuthRequestMutation) ConnectorID() (r string, exists bool) { + v := m.connector_id + if v == nil { + return + } + return *v, true +} + +// OldConnectorID returns the old "connector_id" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldConnectorID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConnectorID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConnectorID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConnectorID: %w", err) + } + return oldValue.ConnectorID, nil +} + +// ResetConnectorID resets all changes to the "connector_id" field. +func (m *AuthRequestMutation) ResetConnectorID() { + m.connector_id = nil +} + +// SetConnectorData sets the "connector_data" field. +func (m *AuthRequestMutation) SetConnectorData(b []byte) { + m.connector_data = &b +} + +// ConnectorData returns the value of the "connector_data" field in the mutation. +func (m *AuthRequestMutation) ConnectorData() (r []byte, exists bool) { + v := m.connector_data + if v == nil { + return + } + return *v, true +} + +// OldConnectorData returns the old "connector_data" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldConnectorData(ctx context.Context) (v *[]byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConnectorData is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConnectorData requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConnectorData: %w", err) + } + return oldValue.ConnectorData, nil +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (m *AuthRequestMutation) ClearConnectorData() { + m.connector_data = nil + m.clearedFields[authrequest.FieldConnectorData] = struct{}{} +} + +// ConnectorDataCleared returns if the "connector_data" field was cleared in this mutation. +func (m *AuthRequestMutation) ConnectorDataCleared() bool { + _, ok := m.clearedFields[authrequest.FieldConnectorData] + return ok +} + +// ResetConnectorData resets all changes to the "connector_data" field. +func (m *AuthRequestMutation) ResetConnectorData() { + m.connector_data = nil + delete(m.clearedFields, authrequest.FieldConnectorData) +} + +// SetExpiry sets the "expiry" field. +func (m *AuthRequestMutation) SetExpiry(t time.Time) { + m.expiry = &t +} + +// Expiry returns the value of the "expiry" field in the mutation. +func (m *AuthRequestMutation) Expiry() (r time.Time, exists bool) { + v := m.expiry + if v == nil { + return + } + return *v, true +} + +// OldExpiry returns the old "expiry" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldExpiry(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiry is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiry requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiry: %w", err) + } + return oldValue.Expiry, nil +} + +// ResetExpiry resets all changes to the "expiry" field. +func (m *AuthRequestMutation) ResetExpiry() { + m.expiry = nil +} + +// SetCodeChallenge sets the "code_challenge" field. +func (m *AuthRequestMutation) SetCodeChallenge(s string) { + m.code_challenge = &s +} + +// CodeChallenge returns the value of the "code_challenge" field in the mutation. +func (m *AuthRequestMutation) CodeChallenge() (r string, exists bool) { + v := m.code_challenge + if v == nil { + return + } + return *v, true +} + +// OldCodeChallenge returns the old "code_challenge" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldCodeChallenge(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCodeChallenge is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCodeChallenge requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCodeChallenge: %w", err) + } + return oldValue.CodeChallenge, nil +} + +// ResetCodeChallenge resets all changes to the "code_challenge" field. +func (m *AuthRequestMutation) ResetCodeChallenge() { + m.code_challenge = nil +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (m *AuthRequestMutation) SetCodeChallengeMethod(s string) { + m.code_challenge_method = &s +} + +// CodeChallengeMethod returns the value of the "code_challenge_method" field in the mutation. +func (m *AuthRequestMutation) CodeChallengeMethod() (r string, exists bool) { + v := m.code_challenge_method + if v == nil { + return + } + return *v, true +} + +// OldCodeChallengeMethod returns the old "code_challenge_method" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldCodeChallengeMethod(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCodeChallengeMethod is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCodeChallengeMethod requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCodeChallengeMethod: %w", err) + } + return oldValue.CodeChallengeMethod, nil +} + +// ResetCodeChallengeMethod resets all changes to the "code_challenge_method" field. +func (m *AuthRequestMutation) ResetCodeChallengeMethod() { + m.code_challenge_method = nil +} + +// SetHmacKey sets the "hmac_key" field. +func (m *AuthRequestMutation) SetHmacKey(b []byte) { + m.hmac_key = &b +} + +// HmacKey returns the value of the "hmac_key" field in the mutation. +func (m *AuthRequestMutation) HmacKey() (r []byte, exists bool) { + v := m.hmac_key + if v == nil { + return + } + return *v, true +} + +// OldHmacKey returns the old "hmac_key" field's value of the AuthRequest entity. +// If the AuthRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AuthRequestMutation) OldHmacKey(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldHmacKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldHmacKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldHmacKey: %w", err) + } + return oldValue.HmacKey, nil +} + +// ResetHmacKey resets all changes to the "hmac_key" field. +func (m *AuthRequestMutation) ResetHmacKey() { + m.hmac_key = nil +} + +// Where appends a list predicates to the AuthRequestMutation builder. +func (m *AuthRequestMutation) Where(ps ...predicate.AuthRequest) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AuthRequestMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AuthRequestMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AuthRequest, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AuthRequestMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AuthRequestMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (AuthRequest). +func (m *AuthRequestMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AuthRequestMutation) Fields() []string { + fields := make([]string, 0, 20) + if m.client_id != nil { + fields = append(fields, authrequest.FieldClientID) + } + if m.scopes != nil { + fields = append(fields, authrequest.FieldScopes) + } + if m.response_types != nil { + fields = append(fields, authrequest.FieldResponseTypes) + } + if m.redirect_uri != nil { + fields = append(fields, authrequest.FieldRedirectURI) + } + if m.nonce != nil { + fields = append(fields, authrequest.FieldNonce) + } + if m.state != nil { + fields = append(fields, authrequest.FieldState) + } + if m.force_approval_prompt != nil { + fields = append(fields, authrequest.FieldForceApprovalPrompt) + } + if m.logged_in != nil { + fields = append(fields, authrequest.FieldLoggedIn) + } + if m.claims_user_id != nil { + fields = append(fields, authrequest.FieldClaimsUserID) + } + if m.claims_username != nil { + fields = append(fields, authrequest.FieldClaimsUsername) + } + if m.claims_email != nil { + fields = append(fields, authrequest.FieldClaimsEmail) + } + if m.claims_email_verified != nil { + fields = append(fields, authrequest.FieldClaimsEmailVerified) + } + if m.claims_groups != nil { + fields = append(fields, authrequest.FieldClaimsGroups) + } + if m.claims_preferred_username != nil { + fields = append(fields, authrequest.FieldClaimsPreferredUsername) + } + if m.connector_id != nil { + fields = append(fields, authrequest.FieldConnectorID) + } + if m.connector_data != nil { + fields = append(fields, authrequest.FieldConnectorData) + } + if m.expiry != nil { + fields = append(fields, authrequest.FieldExpiry) + } + if m.code_challenge != nil { + fields = append(fields, authrequest.FieldCodeChallenge) + } + if m.code_challenge_method != nil { + fields = append(fields, authrequest.FieldCodeChallengeMethod) + } + if m.hmac_key != nil { + fields = append(fields, authrequest.FieldHmacKey) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AuthRequestMutation) Field(name string) (ent.Value, bool) { + switch name { + case authrequest.FieldClientID: + return m.ClientID() + case authrequest.FieldScopes: + return m.Scopes() + case authrequest.FieldResponseTypes: + return m.ResponseTypes() + case authrequest.FieldRedirectURI: + return m.RedirectURI() + case authrequest.FieldNonce: + return m.Nonce() + case authrequest.FieldState: + return m.State() + case authrequest.FieldForceApprovalPrompt: + return m.ForceApprovalPrompt() + case authrequest.FieldLoggedIn: + return m.LoggedIn() + case authrequest.FieldClaimsUserID: + return m.ClaimsUserID() + case authrequest.FieldClaimsUsername: + return m.ClaimsUsername() + case authrequest.FieldClaimsEmail: + return m.ClaimsEmail() + case authrequest.FieldClaimsEmailVerified: + return m.ClaimsEmailVerified() + case authrequest.FieldClaimsGroups: + return m.ClaimsGroups() + case authrequest.FieldClaimsPreferredUsername: + return m.ClaimsPreferredUsername() + case authrequest.FieldConnectorID: + return m.ConnectorID() + case authrequest.FieldConnectorData: + return m.ConnectorData() + case authrequest.FieldExpiry: + return m.Expiry() + case authrequest.FieldCodeChallenge: + return m.CodeChallenge() + case authrequest.FieldCodeChallengeMethod: + return m.CodeChallengeMethod() + case authrequest.FieldHmacKey: + return m.HmacKey() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AuthRequestMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case authrequest.FieldClientID: + return m.OldClientID(ctx) + case authrequest.FieldScopes: + return m.OldScopes(ctx) + case authrequest.FieldResponseTypes: + return m.OldResponseTypes(ctx) + case authrequest.FieldRedirectURI: + return m.OldRedirectURI(ctx) + case authrequest.FieldNonce: + return m.OldNonce(ctx) + case authrequest.FieldState: + return m.OldState(ctx) + case authrequest.FieldForceApprovalPrompt: + return m.OldForceApprovalPrompt(ctx) + case authrequest.FieldLoggedIn: + return m.OldLoggedIn(ctx) + case authrequest.FieldClaimsUserID: + return m.OldClaimsUserID(ctx) + case authrequest.FieldClaimsUsername: + return m.OldClaimsUsername(ctx) + case authrequest.FieldClaimsEmail: + return m.OldClaimsEmail(ctx) + case authrequest.FieldClaimsEmailVerified: + return m.OldClaimsEmailVerified(ctx) + case authrequest.FieldClaimsGroups: + return m.OldClaimsGroups(ctx) + case authrequest.FieldClaimsPreferredUsername: + return m.OldClaimsPreferredUsername(ctx) + case authrequest.FieldConnectorID: + return m.OldConnectorID(ctx) + case authrequest.FieldConnectorData: + return m.OldConnectorData(ctx) + case authrequest.FieldExpiry: + return m.OldExpiry(ctx) + case authrequest.FieldCodeChallenge: + return m.OldCodeChallenge(ctx) + case authrequest.FieldCodeChallengeMethod: + return m.OldCodeChallengeMethod(ctx) + case authrequest.FieldHmacKey: + return m.OldHmacKey(ctx) + } + return nil, fmt.Errorf("unknown AuthRequest field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AuthRequestMutation) SetField(name string, value ent.Value) error { + switch name { + case authrequest.FieldClientID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClientID(v) + return nil + case authrequest.FieldScopes: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScopes(v) + return nil + case authrequest.FieldResponseTypes: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetResponseTypes(v) + return nil + case authrequest.FieldRedirectURI: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRedirectURI(v) + return nil + case authrequest.FieldNonce: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNonce(v) + return nil + case authrequest.FieldState: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetState(v) + return nil + case authrequest.FieldForceApprovalPrompt: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetForceApprovalPrompt(v) + return nil + case authrequest.FieldLoggedIn: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLoggedIn(v) + return nil + case authrequest.FieldClaimsUserID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsUserID(v) + return nil + case authrequest.FieldClaimsUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsUsername(v) + return nil + case authrequest.FieldClaimsEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsEmail(v) + return nil + case authrequest.FieldClaimsEmailVerified: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsEmailVerified(v) + return nil + case authrequest.FieldClaimsGroups: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsGroups(v) + return nil + case authrequest.FieldClaimsPreferredUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsPreferredUsername(v) + return nil + case authrequest.FieldConnectorID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnectorID(v) + return nil + case authrequest.FieldConnectorData: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnectorData(v) + return nil + case authrequest.FieldExpiry: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiry(v) + return nil + case authrequest.FieldCodeChallenge: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCodeChallenge(v) + return nil + case authrequest.FieldCodeChallengeMethod: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCodeChallengeMethod(v) + return nil + case authrequest.FieldHmacKey: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetHmacKey(v) + return nil + } + return fmt.Errorf("unknown AuthRequest field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AuthRequestMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AuthRequestMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AuthRequestMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown AuthRequest numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AuthRequestMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(authrequest.FieldScopes) { + fields = append(fields, authrequest.FieldScopes) + } + if m.FieldCleared(authrequest.FieldResponseTypes) { + fields = append(fields, authrequest.FieldResponseTypes) + } + if m.FieldCleared(authrequest.FieldClaimsGroups) { + fields = append(fields, authrequest.FieldClaimsGroups) + } + if m.FieldCleared(authrequest.FieldConnectorData) { + fields = append(fields, authrequest.FieldConnectorData) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AuthRequestMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AuthRequestMutation) ClearField(name string) error { + switch name { + case authrequest.FieldScopes: + m.ClearScopes() + return nil + case authrequest.FieldResponseTypes: + m.ClearResponseTypes() + return nil + case authrequest.FieldClaimsGroups: + m.ClearClaimsGroups() + return nil + case authrequest.FieldConnectorData: + m.ClearConnectorData() + return nil + } + return fmt.Errorf("unknown AuthRequest nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AuthRequestMutation) ResetField(name string) error { + switch name { + case authrequest.FieldClientID: + m.ResetClientID() + return nil + case authrequest.FieldScopes: + m.ResetScopes() + return nil + case authrequest.FieldResponseTypes: + m.ResetResponseTypes() + return nil + case authrequest.FieldRedirectURI: + m.ResetRedirectURI() + return nil + case authrequest.FieldNonce: + m.ResetNonce() + return nil + case authrequest.FieldState: + m.ResetState() + return nil + case authrequest.FieldForceApprovalPrompt: + m.ResetForceApprovalPrompt() + return nil + case authrequest.FieldLoggedIn: + m.ResetLoggedIn() + return nil + case authrequest.FieldClaimsUserID: + m.ResetClaimsUserID() + return nil + case authrequest.FieldClaimsUsername: + m.ResetClaimsUsername() + return nil + case authrequest.FieldClaimsEmail: + m.ResetClaimsEmail() + return nil + case authrequest.FieldClaimsEmailVerified: + m.ResetClaimsEmailVerified() + return nil + case authrequest.FieldClaimsGroups: + m.ResetClaimsGroups() + return nil + case authrequest.FieldClaimsPreferredUsername: + m.ResetClaimsPreferredUsername() + return nil + case authrequest.FieldConnectorID: + m.ResetConnectorID() + return nil + case authrequest.FieldConnectorData: + m.ResetConnectorData() + return nil + case authrequest.FieldExpiry: + m.ResetExpiry() + return nil + case authrequest.FieldCodeChallenge: + m.ResetCodeChallenge() + return nil + case authrequest.FieldCodeChallengeMethod: + m.ResetCodeChallengeMethod() + return nil + case authrequest.FieldHmacKey: + m.ResetHmacKey() + return nil + } + return fmt.Errorf("unknown AuthRequest field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AuthRequestMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AuthRequestMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AuthRequestMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AuthRequestMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AuthRequestMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AuthRequestMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AuthRequestMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown AuthRequest unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AuthRequestMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown AuthRequest edge %s", name) +} + +// ConnectorMutation represents an operation that mutates the Connector nodes in the graph. +type ConnectorMutation struct { + config + op Op + typ string + id *string + _type *string + name *string + resource_version *string + _config *[]byte + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Connector, error) + predicates []predicate.Connector +} + +var _ ent.Mutation = (*ConnectorMutation)(nil) + +// connectorOption allows management of the mutation configuration using functional options. +type connectorOption func(*ConnectorMutation) + +// newConnectorMutation creates new mutation for the Connector entity. +func newConnectorMutation(c config, op Op, opts ...connectorOption) *ConnectorMutation { + m := &ConnectorMutation{ + config: c, + op: op, + typ: TypeConnector, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withConnectorID sets the ID field of the mutation. +func withConnectorID(id string) connectorOption { + return func(m *ConnectorMutation) { + var ( + err error + once sync.Once + value *Connector + ) + m.oldValue = func(ctx context.Context) (*Connector, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Connector.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withConnector sets the old Connector of the mutation. +func withConnector(node *Connector) connectorOption { + return func(m *ConnectorMutation) { + m.oldValue = func(context.Context) (*Connector, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ConnectorMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ConnectorMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Connector entities. +func (m *ConnectorMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ConnectorMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ConnectorMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Connector.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetType sets the "type" field. +func (m *ConnectorMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the value of the "type" field in the mutation. +func (m *ConnectorMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the Connector entity. +// If the Connector object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ConnectorMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType resets all changes to the "type" field. +func (m *ConnectorMutation) ResetType() { + m._type = nil +} + +// SetName sets the "name" field. +func (m *ConnectorMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *ConnectorMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Connector entity. +// If the Connector object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ConnectorMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *ConnectorMutation) ResetName() { + m.name = nil +} + +// SetResourceVersion sets the "resource_version" field. +func (m *ConnectorMutation) SetResourceVersion(s string) { + m.resource_version = &s +} + +// ResourceVersion returns the value of the "resource_version" field in the mutation. +func (m *ConnectorMutation) ResourceVersion() (r string, exists bool) { + v := m.resource_version + if v == nil { + return + } + return *v, true +} + +// OldResourceVersion returns the old "resource_version" field's value of the Connector entity. +// If the Connector object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ConnectorMutation) OldResourceVersion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldResourceVersion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldResourceVersion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldResourceVersion: %w", err) + } + return oldValue.ResourceVersion, nil +} + +// ResetResourceVersion resets all changes to the "resource_version" field. +func (m *ConnectorMutation) ResetResourceVersion() { + m.resource_version = nil +} + +// SetConfig sets the "config" field. +func (m *ConnectorMutation) SetConfig(b []byte) { + m._config = &b +} + +// Config returns the value of the "config" field in the mutation. +func (m *ConnectorMutation) Config() (r []byte, exists bool) { + v := m._config + if v == nil { + return + } + return *v, true +} + +// OldConfig returns the old "config" field's value of the Connector entity. +// If the Connector object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ConnectorMutation) OldConfig(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConfig is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConfig requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConfig: %w", err) + } + return oldValue.Config, nil +} + +// ResetConfig resets all changes to the "config" field. +func (m *ConnectorMutation) ResetConfig() { + m._config = nil +} + +// Where appends a list predicates to the ConnectorMutation builder. +func (m *ConnectorMutation) Where(ps ...predicate.Connector) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ConnectorMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ConnectorMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Connector, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ConnectorMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ConnectorMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Connector). +func (m *ConnectorMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ConnectorMutation) Fields() []string { + fields := make([]string, 0, 4) + if m._type != nil { + fields = append(fields, connector.FieldType) + } + if m.name != nil { + fields = append(fields, connector.FieldName) + } + if m.resource_version != nil { + fields = append(fields, connector.FieldResourceVersion) + } + if m._config != nil { + fields = append(fields, connector.FieldConfig) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ConnectorMutation) Field(name string) (ent.Value, bool) { + switch name { + case connector.FieldType: + return m.GetType() + case connector.FieldName: + return m.Name() + case connector.FieldResourceVersion: + return m.ResourceVersion() + case connector.FieldConfig: + return m.Config() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ConnectorMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case connector.FieldType: + return m.OldType(ctx) + case connector.FieldName: + return m.OldName(ctx) + case connector.FieldResourceVersion: + return m.OldResourceVersion(ctx) + case connector.FieldConfig: + return m.OldConfig(ctx) + } + return nil, fmt.Errorf("unknown Connector field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ConnectorMutation) SetField(name string, value ent.Value) error { + switch name { + case connector.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case connector.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case connector.FieldResourceVersion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetResourceVersion(v) + return nil + case connector.FieldConfig: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConfig(v) + return nil + } + return fmt.Errorf("unknown Connector field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ConnectorMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ConnectorMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ConnectorMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Connector numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ConnectorMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ConnectorMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ConnectorMutation) ClearField(name string) error { + return fmt.Errorf("unknown Connector nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ConnectorMutation) ResetField(name string) error { + switch name { + case connector.FieldType: + m.ResetType() + return nil + case connector.FieldName: + m.ResetName() + return nil + case connector.FieldResourceVersion: + m.ResetResourceVersion() + return nil + case connector.FieldConfig: + m.ResetConfig() + return nil + } + return fmt.Errorf("unknown Connector field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ConnectorMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ConnectorMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ConnectorMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ConnectorMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ConnectorMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ConnectorMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ConnectorMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Connector unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ConnectorMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Connector edge %s", name) +} + +// DeviceRequestMutation represents an operation that mutates the DeviceRequest nodes in the graph. +type DeviceRequestMutation struct { + config + op Op + typ string + id *int + user_code *string + device_code *string + client_id *string + client_secret *string + scopes *[]string + appendscopes []string + expiry *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*DeviceRequest, error) + predicates []predicate.DeviceRequest +} + +var _ ent.Mutation = (*DeviceRequestMutation)(nil) + +// devicerequestOption allows management of the mutation configuration using functional options. +type devicerequestOption func(*DeviceRequestMutation) + +// newDeviceRequestMutation creates new mutation for the DeviceRequest entity. +func newDeviceRequestMutation(c config, op Op, opts ...devicerequestOption) *DeviceRequestMutation { + m := &DeviceRequestMutation{ + config: c, + op: op, + typ: TypeDeviceRequest, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDeviceRequestID sets the ID field of the mutation. +func withDeviceRequestID(id int) devicerequestOption { + return func(m *DeviceRequestMutation) { + var ( + err error + once sync.Once + value *DeviceRequest + ) + m.oldValue = func(ctx context.Context) (*DeviceRequest, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().DeviceRequest.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDeviceRequest sets the old DeviceRequest of the mutation. +func withDeviceRequest(node *DeviceRequest) devicerequestOption { + return func(m *DeviceRequestMutation) { + m.oldValue = func(context.Context) (*DeviceRequest, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DeviceRequestMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DeviceRequestMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DeviceRequestMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DeviceRequestMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().DeviceRequest.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetUserCode sets the "user_code" field. +func (m *DeviceRequestMutation) SetUserCode(s string) { + m.user_code = &s +} + +// UserCode returns the value of the "user_code" field in the mutation. +func (m *DeviceRequestMutation) UserCode() (r string, exists bool) { + v := m.user_code + if v == nil { + return + } + return *v, true +} + +// OldUserCode returns the old "user_code" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceRequestMutation) OldUserCode(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserCode: %w", err) + } + return oldValue.UserCode, nil +} + +// ResetUserCode resets all changes to the "user_code" field. +func (m *DeviceRequestMutation) ResetUserCode() { + m.user_code = nil +} + +// SetDeviceCode sets the "device_code" field. +func (m *DeviceRequestMutation) SetDeviceCode(s string) { + m.device_code = &s +} + +// DeviceCode returns the value of the "device_code" field in the mutation. +func (m *DeviceRequestMutation) DeviceCode() (r string, exists bool) { + v := m.device_code + if v == nil { + return + } + return *v, true +} + +// OldDeviceCode returns the old "device_code" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceRequestMutation) OldDeviceCode(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeviceCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeviceCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeviceCode: %w", err) + } + return oldValue.DeviceCode, nil +} + +// ResetDeviceCode resets all changes to the "device_code" field. +func (m *DeviceRequestMutation) ResetDeviceCode() { + m.device_code = nil +} + +// SetClientID sets the "client_id" field. +func (m *DeviceRequestMutation) SetClientID(s string) { + m.client_id = &s +} + +// ClientID returns the value of the "client_id" field in the mutation. +func (m *DeviceRequestMutation) ClientID() (r string, exists bool) { + v := m.client_id + if v == nil { + return + } + return *v, true +} + +// OldClientID returns the old "client_id" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceRequestMutation) OldClientID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClientID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClientID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClientID: %w", err) + } + return oldValue.ClientID, nil +} + +// ResetClientID resets all changes to the "client_id" field. +func (m *DeviceRequestMutation) ResetClientID() { + m.client_id = nil +} + +// SetClientSecret sets the "client_secret" field. +func (m *DeviceRequestMutation) SetClientSecret(s string) { + m.client_secret = &s +} + +// ClientSecret returns the value of the "client_secret" field in the mutation. +func (m *DeviceRequestMutation) ClientSecret() (r string, exists bool) { + v := m.client_secret + if v == nil { + return + } + return *v, true +} + +// OldClientSecret returns the old "client_secret" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceRequestMutation) OldClientSecret(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClientSecret is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClientSecret requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClientSecret: %w", err) + } + return oldValue.ClientSecret, nil +} + +// ResetClientSecret resets all changes to the "client_secret" field. +func (m *DeviceRequestMutation) ResetClientSecret() { + m.client_secret = nil +} + +// SetScopes sets the "scopes" field. +func (m *DeviceRequestMutation) SetScopes(s []string) { + m.scopes = &s + m.appendscopes = nil +} + +// Scopes returns the value of the "scopes" field in the mutation. +func (m *DeviceRequestMutation) Scopes() (r []string, exists bool) { + v := m.scopes + if v == nil { + return + } + return *v, true +} + +// OldScopes returns the old "scopes" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceRequestMutation) OldScopes(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScopes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScopes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScopes: %w", err) + } + return oldValue.Scopes, nil +} + +// AppendScopes adds s to the "scopes" field. +func (m *DeviceRequestMutation) AppendScopes(s []string) { + m.appendscopes = append(m.appendscopes, s...) +} + +// AppendedScopes returns the list of values that were appended to the "scopes" field in this mutation. +func (m *DeviceRequestMutation) AppendedScopes() ([]string, bool) { + if len(m.appendscopes) == 0 { + return nil, false + } + return m.appendscopes, true +} + +// ClearScopes clears the value of the "scopes" field. +func (m *DeviceRequestMutation) ClearScopes() { + m.scopes = nil + m.appendscopes = nil + m.clearedFields[devicerequest.FieldScopes] = struct{}{} +} + +// ScopesCleared returns if the "scopes" field was cleared in this mutation. +func (m *DeviceRequestMutation) ScopesCleared() bool { + _, ok := m.clearedFields[devicerequest.FieldScopes] + return ok +} + +// ResetScopes resets all changes to the "scopes" field. +func (m *DeviceRequestMutation) ResetScopes() { + m.scopes = nil + m.appendscopes = nil + delete(m.clearedFields, devicerequest.FieldScopes) +} + +// SetExpiry sets the "expiry" field. +func (m *DeviceRequestMutation) SetExpiry(t time.Time) { + m.expiry = &t +} + +// Expiry returns the value of the "expiry" field in the mutation. +func (m *DeviceRequestMutation) Expiry() (r time.Time, exists bool) { + v := m.expiry + if v == nil { + return + } + return *v, true +} + +// OldExpiry returns the old "expiry" field's value of the DeviceRequest entity. +// If the DeviceRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceRequestMutation) OldExpiry(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiry is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiry requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiry: %w", err) + } + return oldValue.Expiry, nil +} + +// ResetExpiry resets all changes to the "expiry" field. +func (m *DeviceRequestMutation) ResetExpiry() { + m.expiry = nil +} + +// Where appends a list predicates to the DeviceRequestMutation builder. +func (m *DeviceRequestMutation) Where(ps ...predicate.DeviceRequest) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the DeviceRequestMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DeviceRequestMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.DeviceRequest, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *DeviceRequestMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *DeviceRequestMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (DeviceRequest). +func (m *DeviceRequestMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DeviceRequestMutation) Fields() []string { + fields := make([]string, 0, 6) + if m.user_code != nil { + fields = append(fields, devicerequest.FieldUserCode) + } + if m.device_code != nil { + fields = append(fields, devicerequest.FieldDeviceCode) + } + if m.client_id != nil { + fields = append(fields, devicerequest.FieldClientID) + } + if m.client_secret != nil { + fields = append(fields, devicerequest.FieldClientSecret) + } + if m.scopes != nil { + fields = append(fields, devicerequest.FieldScopes) + } + if m.expiry != nil { + fields = append(fields, devicerequest.FieldExpiry) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DeviceRequestMutation) Field(name string) (ent.Value, bool) { + switch name { + case devicerequest.FieldUserCode: + return m.UserCode() + case devicerequest.FieldDeviceCode: + return m.DeviceCode() + case devicerequest.FieldClientID: + return m.ClientID() + case devicerequest.FieldClientSecret: + return m.ClientSecret() + case devicerequest.FieldScopes: + return m.Scopes() + case devicerequest.FieldExpiry: + return m.Expiry() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DeviceRequestMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case devicerequest.FieldUserCode: + return m.OldUserCode(ctx) + case devicerequest.FieldDeviceCode: + return m.OldDeviceCode(ctx) + case devicerequest.FieldClientID: + return m.OldClientID(ctx) + case devicerequest.FieldClientSecret: + return m.OldClientSecret(ctx) + case devicerequest.FieldScopes: + return m.OldScopes(ctx) + case devicerequest.FieldExpiry: + return m.OldExpiry(ctx) + } + return nil, fmt.Errorf("unknown DeviceRequest field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DeviceRequestMutation) SetField(name string, value ent.Value) error { + switch name { + case devicerequest.FieldUserCode: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserCode(v) + return nil + case devicerequest.FieldDeviceCode: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeviceCode(v) + return nil + case devicerequest.FieldClientID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClientID(v) + return nil + case devicerequest.FieldClientSecret: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClientSecret(v) + return nil + case devicerequest.FieldScopes: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScopes(v) + return nil + case devicerequest.FieldExpiry: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiry(v) + return nil + } + return fmt.Errorf("unknown DeviceRequest field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DeviceRequestMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DeviceRequestMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DeviceRequestMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown DeviceRequest numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DeviceRequestMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(devicerequest.FieldScopes) { + fields = append(fields, devicerequest.FieldScopes) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DeviceRequestMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DeviceRequestMutation) ClearField(name string) error { + switch name { + case devicerequest.FieldScopes: + m.ClearScopes() + return nil + } + return fmt.Errorf("unknown DeviceRequest nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DeviceRequestMutation) ResetField(name string) error { + switch name { + case devicerequest.FieldUserCode: + m.ResetUserCode() + return nil + case devicerequest.FieldDeviceCode: + m.ResetDeviceCode() + return nil + case devicerequest.FieldClientID: + m.ResetClientID() + return nil + case devicerequest.FieldClientSecret: + m.ResetClientSecret() + return nil + case devicerequest.FieldScopes: + m.ResetScopes() + return nil + case devicerequest.FieldExpiry: + m.ResetExpiry() + return nil + } + return fmt.Errorf("unknown DeviceRequest field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DeviceRequestMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DeviceRequestMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DeviceRequestMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DeviceRequestMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DeviceRequestMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DeviceRequestMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DeviceRequestMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown DeviceRequest unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DeviceRequestMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown DeviceRequest edge %s", name) +} + +// DeviceTokenMutation represents an operation that mutates the DeviceToken nodes in the graph. +type DeviceTokenMutation struct { + config + op Op + typ string + id *int + device_code *string + status *string + token *[]byte + expiry *time.Time + last_request *time.Time + poll_interval *int + addpoll_interval *int + code_challenge *string + code_challenge_method *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*DeviceToken, error) + predicates []predicate.DeviceToken +} + +var _ ent.Mutation = (*DeviceTokenMutation)(nil) + +// devicetokenOption allows management of the mutation configuration using functional options. +type devicetokenOption func(*DeviceTokenMutation) + +// newDeviceTokenMutation creates new mutation for the DeviceToken entity. +func newDeviceTokenMutation(c config, op Op, opts ...devicetokenOption) *DeviceTokenMutation { + m := &DeviceTokenMutation{ + config: c, + op: op, + typ: TypeDeviceToken, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDeviceTokenID sets the ID field of the mutation. +func withDeviceTokenID(id int) devicetokenOption { + return func(m *DeviceTokenMutation) { + var ( + err error + once sync.Once + value *DeviceToken + ) + m.oldValue = func(ctx context.Context) (*DeviceToken, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().DeviceToken.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDeviceToken sets the old DeviceToken of the mutation. +func withDeviceToken(node *DeviceToken) devicetokenOption { + return func(m *DeviceTokenMutation) { + m.oldValue = func(context.Context) (*DeviceToken, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DeviceTokenMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DeviceTokenMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DeviceTokenMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DeviceTokenMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().DeviceToken.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetDeviceCode sets the "device_code" field. +func (m *DeviceTokenMutation) SetDeviceCode(s string) { + m.device_code = &s +} + +// DeviceCode returns the value of the "device_code" field in the mutation. +func (m *DeviceTokenMutation) DeviceCode() (r string, exists bool) { + v := m.device_code + if v == nil { + return + } + return *v, true +} + +// OldDeviceCode returns the old "device_code" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldDeviceCode(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeviceCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeviceCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeviceCode: %w", err) + } + return oldValue.DeviceCode, nil +} + +// ResetDeviceCode resets all changes to the "device_code" field. +func (m *DeviceTokenMutation) ResetDeviceCode() { + m.device_code = nil +} + +// SetStatus sets the "status" field. +func (m *DeviceTokenMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *DeviceTokenMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *DeviceTokenMutation) ResetStatus() { + m.status = nil +} + +// SetToken sets the "token" field. +func (m *DeviceTokenMutation) SetToken(b []byte) { + m.token = &b +} + +// Token returns the value of the "token" field in the mutation. +func (m *DeviceTokenMutation) Token() (r []byte, exists bool) { + v := m.token + if v == nil { + return + } + return *v, true +} + +// OldToken returns the old "token" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldToken(ctx context.Context) (v *[]byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldToken is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldToken requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldToken: %w", err) + } + return oldValue.Token, nil +} + +// ClearToken clears the value of the "token" field. +func (m *DeviceTokenMutation) ClearToken() { + m.token = nil + m.clearedFields[devicetoken.FieldToken] = struct{}{} +} + +// TokenCleared returns if the "token" field was cleared in this mutation. +func (m *DeviceTokenMutation) TokenCleared() bool { + _, ok := m.clearedFields[devicetoken.FieldToken] + return ok +} + +// ResetToken resets all changes to the "token" field. +func (m *DeviceTokenMutation) ResetToken() { + m.token = nil + delete(m.clearedFields, devicetoken.FieldToken) +} + +// SetExpiry sets the "expiry" field. +func (m *DeviceTokenMutation) SetExpiry(t time.Time) { + m.expiry = &t +} + +// Expiry returns the value of the "expiry" field in the mutation. +func (m *DeviceTokenMutation) Expiry() (r time.Time, exists bool) { + v := m.expiry + if v == nil { + return + } + return *v, true +} + +// OldExpiry returns the old "expiry" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldExpiry(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiry is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiry requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiry: %w", err) + } + return oldValue.Expiry, nil +} + +// ResetExpiry resets all changes to the "expiry" field. +func (m *DeviceTokenMutation) ResetExpiry() { + m.expiry = nil +} + +// SetLastRequest sets the "last_request" field. +func (m *DeviceTokenMutation) SetLastRequest(t time.Time) { + m.last_request = &t +} + +// LastRequest returns the value of the "last_request" field in the mutation. +func (m *DeviceTokenMutation) LastRequest() (r time.Time, exists bool) { + v := m.last_request + if v == nil { + return + } + return *v, true +} + +// OldLastRequest returns the old "last_request" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldLastRequest(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLastRequest is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLastRequest requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastRequest: %w", err) + } + return oldValue.LastRequest, nil +} + +// ResetLastRequest resets all changes to the "last_request" field. +func (m *DeviceTokenMutation) ResetLastRequest() { + m.last_request = nil +} + +// SetPollInterval sets the "poll_interval" field. +func (m *DeviceTokenMutation) SetPollInterval(i int) { + m.poll_interval = &i + m.addpoll_interval = nil +} + +// PollInterval returns the value of the "poll_interval" field in the mutation. +func (m *DeviceTokenMutation) PollInterval() (r int, exists bool) { + v := m.poll_interval + if v == nil { + return + } + return *v, true +} + +// OldPollInterval returns the old "poll_interval" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldPollInterval(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPollInterval is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPollInterval requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPollInterval: %w", err) + } + return oldValue.PollInterval, nil +} + +// AddPollInterval adds i to the "poll_interval" field. +func (m *DeviceTokenMutation) AddPollInterval(i int) { + if m.addpoll_interval != nil { + *m.addpoll_interval += i + } else { + m.addpoll_interval = &i + } +} + +// AddedPollInterval returns the value that was added to the "poll_interval" field in this mutation. +func (m *DeviceTokenMutation) AddedPollInterval() (r int, exists bool) { + v := m.addpoll_interval + if v == nil { + return + } + return *v, true +} + +// ResetPollInterval resets all changes to the "poll_interval" field. +func (m *DeviceTokenMutation) ResetPollInterval() { + m.poll_interval = nil + m.addpoll_interval = nil +} + +// SetCodeChallenge sets the "code_challenge" field. +func (m *DeviceTokenMutation) SetCodeChallenge(s string) { + m.code_challenge = &s +} + +// CodeChallenge returns the value of the "code_challenge" field in the mutation. +func (m *DeviceTokenMutation) CodeChallenge() (r string, exists bool) { + v := m.code_challenge + if v == nil { + return + } + return *v, true +} + +// OldCodeChallenge returns the old "code_challenge" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldCodeChallenge(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCodeChallenge is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCodeChallenge requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCodeChallenge: %w", err) + } + return oldValue.CodeChallenge, nil +} + +// ResetCodeChallenge resets all changes to the "code_challenge" field. +func (m *DeviceTokenMutation) ResetCodeChallenge() { + m.code_challenge = nil +} + +// SetCodeChallengeMethod sets the "code_challenge_method" field. +func (m *DeviceTokenMutation) SetCodeChallengeMethod(s string) { + m.code_challenge_method = &s +} + +// CodeChallengeMethod returns the value of the "code_challenge_method" field in the mutation. +func (m *DeviceTokenMutation) CodeChallengeMethod() (r string, exists bool) { + v := m.code_challenge_method + if v == nil { + return + } + return *v, true +} + +// OldCodeChallengeMethod returns the old "code_challenge_method" field's value of the DeviceToken entity. +// If the DeviceToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DeviceTokenMutation) OldCodeChallengeMethod(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCodeChallengeMethod is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCodeChallengeMethod requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCodeChallengeMethod: %w", err) + } + return oldValue.CodeChallengeMethod, nil +} + +// ResetCodeChallengeMethod resets all changes to the "code_challenge_method" field. +func (m *DeviceTokenMutation) ResetCodeChallengeMethod() { + m.code_challenge_method = nil +} + +// Where appends a list predicates to the DeviceTokenMutation builder. +func (m *DeviceTokenMutation) Where(ps ...predicate.DeviceToken) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the DeviceTokenMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DeviceTokenMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.DeviceToken, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *DeviceTokenMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *DeviceTokenMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (DeviceToken). +func (m *DeviceTokenMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DeviceTokenMutation) Fields() []string { + fields := make([]string, 0, 8) + if m.device_code != nil { + fields = append(fields, devicetoken.FieldDeviceCode) + } + if m.status != nil { + fields = append(fields, devicetoken.FieldStatus) + } + if m.token != nil { + fields = append(fields, devicetoken.FieldToken) + } + if m.expiry != nil { + fields = append(fields, devicetoken.FieldExpiry) + } + if m.last_request != nil { + fields = append(fields, devicetoken.FieldLastRequest) + } + if m.poll_interval != nil { + fields = append(fields, devicetoken.FieldPollInterval) + } + if m.code_challenge != nil { + fields = append(fields, devicetoken.FieldCodeChallenge) + } + if m.code_challenge_method != nil { + fields = append(fields, devicetoken.FieldCodeChallengeMethod) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DeviceTokenMutation) Field(name string) (ent.Value, bool) { + switch name { + case devicetoken.FieldDeviceCode: + return m.DeviceCode() + case devicetoken.FieldStatus: + return m.Status() + case devicetoken.FieldToken: + return m.Token() + case devicetoken.FieldExpiry: + return m.Expiry() + case devicetoken.FieldLastRequest: + return m.LastRequest() + case devicetoken.FieldPollInterval: + return m.PollInterval() + case devicetoken.FieldCodeChallenge: + return m.CodeChallenge() + case devicetoken.FieldCodeChallengeMethod: + return m.CodeChallengeMethod() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DeviceTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case devicetoken.FieldDeviceCode: + return m.OldDeviceCode(ctx) + case devicetoken.FieldStatus: + return m.OldStatus(ctx) + case devicetoken.FieldToken: + return m.OldToken(ctx) + case devicetoken.FieldExpiry: + return m.OldExpiry(ctx) + case devicetoken.FieldLastRequest: + return m.OldLastRequest(ctx) + case devicetoken.FieldPollInterval: + return m.OldPollInterval(ctx) + case devicetoken.FieldCodeChallenge: + return m.OldCodeChallenge(ctx) + case devicetoken.FieldCodeChallengeMethod: + return m.OldCodeChallengeMethod(ctx) + } + return nil, fmt.Errorf("unknown DeviceToken field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DeviceTokenMutation) SetField(name string, value ent.Value) error { + switch name { + case devicetoken.FieldDeviceCode: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeviceCode(v) + return nil + case devicetoken.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case devicetoken.FieldToken: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetToken(v) + return nil + case devicetoken.FieldExpiry: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiry(v) + return nil + case devicetoken.FieldLastRequest: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastRequest(v) + return nil + case devicetoken.FieldPollInterval: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPollInterval(v) + return nil + case devicetoken.FieldCodeChallenge: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCodeChallenge(v) + return nil + case devicetoken.FieldCodeChallengeMethod: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCodeChallengeMethod(v) + return nil + } + return fmt.Errorf("unknown DeviceToken field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DeviceTokenMutation) AddedFields() []string { + var fields []string + if m.addpoll_interval != nil { + fields = append(fields, devicetoken.FieldPollInterval) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DeviceTokenMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case devicetoken.FieldPollInterval: + return m.AddedPollInterval() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DeviceTokenMutation) AddField(name string, value ent.Value) error { + switch name { + case devicetoken.FieldPollInterval: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddPollInterval(v) + return nil + } + return fmt.Errorf("unknown DeviceToken numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DeviceTokenMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(devicetoken.FieldToken) { + fields = append(fields, devicetoken.FieldToken) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DeviceTokenMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DeviceTokenMutation) ClearField(name string) error { + switch name { + case devicetoken.FieldToken: + m.ClearToken() + return nil + } + return fmt.Errorf("unknown DeviceToken nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DeviceTokenMutation) ResetField(name string) error { + switch name { + case devicetoken.FieldDeviceCode: + m.ResetDeviceCode() + return nil + case devicetoken.FieldStatus: + m.ResetStatus() + return nil + case devicetoken.FieldToken: + m.ResetToken() + return nil + case devicetoken.FieldExpiry: + m.ResetExpiry() + return nil + case devicetoken.FieldLastRequest: + m.ResetLastRequest() + return nil + case devicetoken.FieldPollInterval: + m.ResetPollInterval() + return nil + case devicetoken.FieldCodeChallenge: + m.ResetCodeChallenge() + return nil + case devicetoken.FieldCodeChallengeMethod: + m.ResetCodeChallengeMethod() + return nil + } + return fmt.Errorf("unknown DeviceToken field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DeviceTokenMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DeviceTokenMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DeviceTokenMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DeviceTokenMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DeviceTokenMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DeviceTokenMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DeviceTokenMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown DeviceToken unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DeviceTokenMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown DeviceToken edge %s", name) +} + +// KeysMutation represents an operation that mutates the Keys nodes in the graph. +type KeysMutation struct { + config + op Op + typ string + id *string + verification_keys *[]storage.VerificationKey + appendverification_keys []storage.VerificationKey + signing_key *jose.JSONWebKey + signing_key_pub *jose.JSONWebKey + next_rotation *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Keys, error) + predicates []predicate.Keys +} + +var _ ent.Mutation = (*KeysMutation)(nil) + +// keysOption allows management of the mutation configuration using functional options. +type keysOption func(*KeysMutation) + +// newKeysMutation creates new mutation for the Keys entity. +func newKeysMutation(c config, op Op, opts ...keysOption) *KeysMutation { + m := &KeysMutation{ + config: c, + op: op, + typ: TypeKeys, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withKeysID sets the ID field of the mutation. +func withKeysID(id string) keysOption { + return func(m *KeysMutation) { + var ( + err error + once sync.Once + value *Keys + ) + m.oldValue = func(ctx context.Context) (*Keys, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Keys.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withKeys sets the old Keys of the mutation. +func withKeys(node *Keys) keysOption { + return func(m *KeysMutation) { + m.oldValue = func(context.Context) (*Keys, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m KeysMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m KeysMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Keys entities. +func (m *KeysMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *KeysMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *KeysMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Keys.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetVerificationKeys sets the "verification_keys" field. +func (m *KeysMutation) SetVerificationKeys(sk []storage.VerificationKey) { + m.verification_keys = &sk + m.appendverification_keys = nil +} + +// VerificationKeys returns the value of the "verification_keys" field in the mutation. +func (m *KeysMutation) VerificationKeys() (r []storage.VerificationKey, exists bool) { + v := m.verification_keys + if v == nil { + return + } + return *v, true +} + +// OldVerificationKeys returns the old "verification_keys" field's value of the Keys entity. +// If the Keys object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *KeysMutation) OldVerificationKeys(ctx context.Context) (v []storage.VerificationKey, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldVerificationKeys is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldVerificationKeys requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldVerificationKeys: %w", err) + } + return oldValue.VerificationKeys, nil +} + +// AppendVerificationKeys adds sk to the "verification_keys" field. +func (m *KeysMutation) AppendVerificationKeys(sk []storage.VerificationKey) { + m.appendverification_keys = append(m.appendverification_keys, sk...) +} + +// AppendedVerificationKeys returns the list of values that were appended to the "verification_keys" field in this mutation. +func (m *KeysMutation) AppendedVerificationKeys() ([]storage.VerificationKey, bool) { + if len(m.appendverification_keys) == 0 { + return nil, false + } + return m.appendverification_keys, true +} + +// ResetVerificationKeys resets all changes to the "verification_keys" field. +func (m *KeysMutation) ResetVerificationKeys() { + m.verification_keys = nil + m.appendverification_keys = nil +} + +// SetSigningKey sets the "signing_key" field. +func (m *KeysMutation) SetSigningKey(jwk jose.JSONWebKey) { + m.signing_key = &jwk +} + +// SigningKey returns the value of the "signing_key" field in the mutation. +func (m *KeysMutation) SigningKey() (r jose.JSONWebKey, exists bool) { + v := m.signing_key + if v == nil { + return + } + return *v, true +} + +// OldSigningKey returns the old "signing_key" field's value of the Keys entity. +// If the Keys object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *KeysMutation) OldSigningKey(ctx context.Context) (v jose.JSONWebKey, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSigningKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSigningKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSigningKey: %w", err) + } + return oldValue.SigningKey, nil +} + +// ResetSigningKey resets all changes to the "signing_key" field. +func (m *KeysMutation) ResetSigningKey() { + m.signing_key = nil +} + +// SetSigningKeyPub sets the "signing_key_pub" field. +func (m *KeysMutation) SetSigningKeyPub(jwk jose.JSONWebKey) { + m.signing_key_pub = &jwk +} + +// SigningKeyPub returns the value of the "signing_key_pub" field in the mutation. +func (m *KeysMutation) SigningKeyPub() (r jose.JSONWebKey, exists bool) { + v := m.signing_key_pub + if v == nil { + return + } + return *v, true +} + +// OldSigningKeyPub returns the old "signing_key_pub" field's value of the Keys entity. +// If the Keys object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *KeysMutation) OldSigningKeyPub(ctx context.Context) (v jose.JSONWebKey, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSigningKeyPub is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSigningKeyPub requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSigningKeyPub: %w", err) + } + return oldValue.SigningKeyPub, nil +} + +// ResetSigningKeyPub resets all changes to the "signing_key_pub" field. +func (m *KeysMutation) ResetSigningKeyPub() { + m.signing_key_pub = nil +} + +// SetNextRotation sets the "next_rotation" field. +func (m *KeysMutation) SetNextRotation(t time.Time) { + m.next_rotation = &t +} + +// NextRotation returns the value of the "next_rotation" field in the mutation. +func (m *KeysMutation) NextRotation() (r time.Time, exists bool) { + v := m.next_rotation + if v == nil { + return + } + return *v, true +} + +// OldNextRotation returns the old "next_rotation" field's value of the Keys entity. +// If the Keys object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *KeysMutation) OldNextRotation(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNextRotation is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNextRotation requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNextRotation: %w", err) + } + return oldValue.NextRotation, nil +} + +// ResetNextRotation resets all changes to the "next_rotation" field. +func (m *KeysMutation) ResetNextRotation() { + m.next_rotation = nil +} + +// Where appends a list predicates to the KeysMutation builder. +func (m *KeysMutation) Where(ps ...predicate.Keys) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the KeysMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *KeysMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Keys, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *KeysMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *KeysMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Keys). +func (m *KeysMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *KeysMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.verification_keys != nil { + fields = append(fields, keys.FieldVerificationKeys) + } + if m.signing_key != nil { + fields = append(fields, keys.FieldSigningKey) + } + if m.signing_key_pub != nil { + fields = append(fields, keys.FieldSigningKeyPub) + } + if m.next_rotation != nil { + fields = append(fields, keys.FieldNextRotation) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *KeysMutation) Field(name string) (ent.Value, bool) { + switch name { + case keys.FieldVerificationKeys: + return m.VerificationKeys() + case keys.FieldSigningKey: + return m.SigningKey() + case keys.FieldSigningKeyPub: + return m.SigningKeyPub() + case keys.FieldNextRotation: + return m.NextRotation() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *KeysMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case keys.FieldVerificationKeys: + return m.OldVerificationKeys(ctx) + case keys.FieldSigningKey: + return m.OldSigningKey(ctx) + case keys.FieldSigningKeyPub: + return m.OldSigningKeyPub(ctx) + case keys.FieldNextRotation: + return m.OldNextRotation(ctx) + } + return nil, fmt.Errorf("unknown Keys field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *KeysMutation) SetField(name string, value ent.Value) error { + switch name { + case keys.FieldVerificationKeys: + v, ok := value.([]storage.VerificationKey) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetVerificationKeys(v) + return nil + case keys.FieldSigningKey: + v, ok := value.(jose.JSONWebKey) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSigningKey(v) + return nil + case keys.FieldSigningKeyPub: + v, ok := value.(jose.JSONWebKey) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSigningKeyPub(v) + return nil + case keys.FieldNextRotation: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNextRotation(v) + return nil + } + return fmt.Errorf("unknown Keys field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *KeysMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *KeysMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *KeysMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Keys numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *KeysMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *KeysMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *KeysMutation) ClearField(name string) error { + return fmt.Errorf("unknown Keys nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *KeysMutation) ResetField(name string) error { + switch name { + case keys.FieldVerificationKeys: + m.ResetVerificationKeys() + return nil + case keys.FieldSigningKey: + m.ResetSigningKey() + return nil + case keys.FieldSigningKeyPub: + m.ResetSigningKeyPub() + return nil + case keys.FieldNextRotation: + m.ResetNextRotation() + return nil + } + return fmt.Errorf("unknown Keys field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *KeysMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *KeysMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *KeysMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *KeysMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *KeysMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *KeysMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *KeysMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Keys unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *KeysMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Keys edge %s", name) +} + +// OAuth2ClientMutation represents an operation that mutates the OAuth2Client nodes in the graph. +type OAuth2ClientMutation struct { + config + op Op + typ string + id *string + secret *string + redirect_uris *[]string + appendredirect_uris []string + trusted_peers *[]string + appendtrusted_peers []string + public *bool + name *string + logo_url *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*OAuth2Client, error) + predicates []predicate.OAuth2Client +} + +var _ ent.Mutation = (*OAuth2ClientMutation)(nil) + +// oauth2clientOption allows management of the mutation configuration using functional options. +type oauth2clientOption func(*OAuth2ClientMutation) + +// newOAuth2ClientMutation creates new mutation for the OAuth2Client entity. +func newOAuth2ClientMutation(c config, op Op, opts ...oauth2clientOption) *OAuth2ClientMutation { + m := &OAuth2ClientMutation{ + config: c, + op: op, + typ: TypeOAuth2Client, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withOAuth2ClientID sets the ID field of the mutation. +func withOAuth2ClientID(id string) oauth2clientOption { + return func(m *OAuth2ClientMutation) { + var ( + err error + once sync.Once + value *OAuth2Client + ) + m.oldValue = func(ctx context.Context) (*OAuth2Client, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().OAuth2Client.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withOAuth2Client sets the old OAuth2Client of the mutation. +func withOAuth2Client(node *OAuth2Client) oauth2clientOption { + return func(m *OAuth2ClientMutation) { + m.oldValue = func(context.Context) (*OAuth2Client, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m OAuth2ClientMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m OAuth2ClientMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of OAuth2Client entities. +func (m *OAuth2ClientMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *OAuth2ClientMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *OAuth2ClientMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().OAuth2Client.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetSecret sets the "secret" field. +func (m *OAuth2ClientMutation) SetSecret(s string) { + m.secret = &s +} + +// Secret returns the value of the "secret" field in the mutation. +func (m *OAuth2ClientMutation) Secret() (r string, exists bool) { + v := m.secret + if v == nil { + return + } + return *v, true +} + +// OldSecret returns the old "secret" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OAuth2ClientMutation) OldSecret(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSecret is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSecret requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSecret: %w", err) + } + return oldValue.Secret, nil +} + +// ResetSecret resets all changes to the "secret" field. +func (m *OAuth2ClientMutation) ResetSecret() { + m.secret = nil +} + +// SetRedirectUris sets the "redirect_uris" field. +func (m *OAuth2ClientMutation) SetRedirectUris(s []string) { + m.redirect_uris = &s + m.appendredirect_uris = nil +} + +// RedirectUris returns the value of the "redirect_uris" field in the mutation. +func (m *OAuth2ClientMutation) RedirectUris() (r []string, exists bool) { + v := m.redirect_uris + if v == nil { + return + } + return *v, true +} + +// OldRedirectUris returns the old "redirect_uris" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OAuth2ClientMutation) OldRedirectUris(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRedirectUris is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRedirectUris requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRedirectUris: %w", err) + } + return oldValue.RedirectUris, nil +} + +// AppendRedirectUris adds s to the "redirect_uris" field. +func (m *OAuth2ClientMutation) AppendRedirectUris(s []string) { + m.appendredirect_uris = append(m.appendredirect_uris, s...) +} + +// AppendedRedirectUris returns the list of values that were appended to the "redirect_uris" field in this mutation. +func (m *OAuth2ClientMutation) AppendedRedirectUris() ([]string, bool) { + if len(m.appendredirect_uris) == 0 { + return nil, false + } + return m.appendredirect_uris, true +} + +// ClearRedirectUris clears the value of the "redirect_uris" field. +func (m *OAuth2ClientMutation) ClearRedirectUris() { + m.redirect_uris = nil + m.appendredirect_uris = nil + m.clearedFields[oauth2client.FieldRedirectUris] = struct{}{} +} + +// RedirectUrisCleared returns if the "redirect_uris" field was cleared in this mutation. +func (m *OAuth2ClientMutation) RedirectUrisCleared() bool { + _, ok := m.clearedFields[oauth2client.FieldRedirectUris] + return ok +} + +// ResetRedirectUris resets all changes to the "redirect_uris" field. +func (m *OAuth2ClientMutation) ResetRedirectUris() { + m.redirect_uris = nil + m.appendredirect_uris = nil + delete(m.clearedFields, oauth2client.FieldRedirectUris) +} + +// SetTrustedPeers sets the "trusted_peers" field. +func (m *OAuth2ClientMutation) SetTrustedPeers(s []string) { + m.trusted_peers = &s + m.appendtrusted_peers = nil +} + +// TrustedPeers returns the value of the "trusted_peers" field in the mutation. +func (m *OAuth2ClientMutation) TrustedPeers() (r []string, exists bool) { + v := m.trusted_peers + if v == nil { + return + } + return *v, true +} + +// OldTrustedPeers returns the old "trusted_peers" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OAuth2ClientMutation) OldTrustedPeers(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTrustedPeers is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTrustedPeers requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTrustedPeers: %w", err) + } + return oldValue.TrustedPeers, nil +} + +// AppendTrustedPeers adds s to the "trusted_peers" field. +func (m *OAuth2ClientMutation) AppendTrustedPeers(s []string) { + m.appendtrusted_peers = append(m.appendtrusted_peers, s...) +} + +// AppendedTrustedPeers returns the list of values that were appended to the "trusted_peers" field in this mutation. +func (m *OAuth2ClientMutation) AppendedTrustedPeers() ([]string, bool) { + if len(m.appendtrusted_peers) == 0 { + return nil, false + } + return m.appendtrusted_peers, true +} + +// ClearTrustedPeers clears the value of the "trusted_peers" field. +func (m *OAuth2ClientMutation) ClearTrustedPeers() { + m.trusted_peers = nil + m.appendtrusted_peers = nil + m.clearedFields[oauth2client.FieldTrustedPeers] = struct{}{} +} + +// TrustedPeersCleared returns if the "trusted_peers" field was cleared in this mutation. +func (m *OAuth2ClientMutation) TrustedPeersCleared() bool { + _, ok := m.clearedFields[oauth2client.FieldTrustedPeers] + return ok +} + +// ResetTrustedPeers resets all changes to the "trusted_peers" field. +func (m *OAuth2ClientMutation) ResetTrustedPeers() { + m.trusted_peers = nil + m.appendtrusted_peers = nil + delete(m.clearedFields, oauth2client.FieldTrustedPeers) +} + +// SetPublic sets the "public" field. +func (m *OAuth2ClientMutation) SetPublic(b bool) { + m.public = &b +} + +// Public returns the value of the "public" field in the mutation. +func (m *OAuth2ClientMutation) Public() (r bool, exists bool) { + v := m.public + if v == nil { + return + } + return *v, true +} + +// OldPublic returns the old "public" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OAuth2ClientMutation) OldPublic(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPublic is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPublic requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPublic: %w", err) + } + return oldValue.Public, nil +} + +// ResetPublic resets all changes to the "public" field. +func (m *OAuth2ClientMutation) ResetPublic() { + m.public = nil +} + +// SetName sets the "name" field. +func (m *OAuth2ClientMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *OAuth2ClientMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OAuth2ClientMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *OAuth2ClientMutation) ResetName() { + m.name = nil +} + +// SetLogoURL sets the "logo_url" field. +func (m *OAuth2ClientMutation) SetLogoURL(s string) { + m.logo_url = &s +} + +// LogoURL returns the value of the "logo_url" field in the mutation. +func (m *OAuth2ClientMutation) LogoURL() (r string, exists bool) { + v := m.logo_url + if v == nil { + return + } + return *v, true +} + +// OldLogoURL returns the old "logo_url" field's value of the OAuth2Client entity. +// If the OAuth2Client object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OAuth2ClientMutation) OldLogoURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLogoURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLogoURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLogoURL: %w", err) + } + return oldValue.LogoURL, nil +} + +// ResetLogoURL resets all changes to the "logo_url" field. +func (m *OAuth2ClientMutation) ResetLogoURL() { + m.logo_url = nil +} + +// Where appends a list predicates to the OAuth2ClientMutation builder. +func (m *OAuth2ClientMutation) Where(ps ...predicate.OAuth2Client) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the OAuth2ClientMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *OAuth2ClientMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.OAuth2Client, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *OAuth2ClientMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *OAuth2ClientMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (OAuth2Client). +func (m *OAuth2ClientMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *OAuth2ClientMutation) Fields() []string { + fields := make([]string, 0, 6) + if m.secret != nil { + fields = append(fields, oauth2client.FieldSecret) + } + if m.redirect_uris != nil { + fields = append(fields, oauth2client.FieldRedirectUris) + } + if m.trusted_peers != nil { + fields = append(fields, oauth2client.FieldTrustedPeers) + } + if m.public != nil { + fields = append(fields, oauth2client.FieldPublic) + } + if m.name != nil { + fields = append(fields, oauth2client.FieldName) + } + if m.logo_url != nil { + fields = append(fields, oauth2client.FieldLogoURL) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *OAuth2ClientMutation) Field(name string) (ent.Value, bool) { + switch name { + case oauth2client.FieldSecret: + return m.Secret() + case oauth2client.FieldRedirectUris: + return m.RedirectUris() + case oauth2client.FieldTrustedPeers: + return m.TrustedPeers() + case oauth2client.FieldPublic: + return m.Public() + case oauth2client.FieldName: + return m.Name() + case oauth2client.FieldLogoURL: + return m.LogoURL() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *OAuth2ClientMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case oauth2client.FieldSecret: + return m.OldSecret(ctx) + case oauth2client.FieldRedirectUris: + return m.OldRedirectUris(ctx) + case oauth2client.FieldTrustedPeers: + return m.OldTrustedPeers(ctx) + case oauth2client.FieldPublic: + return m.OldPublic(ctx) + case oauth2client.FieldName: + return m.OldName(ctx) + case oauth2client.FieldLogoURL: + return m.OldLogoURL(ctx) + } + return nil, fmt.Errorf("unknown OAuth2Client field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *OAuth2ClientMutation) SetField(name string, value ent.Value) error { + switch name { + case oauth2client.FieldSecret: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSecret(v) + return nil + case oauth2client.FieldRedirectUris: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRedirectUris(v) + return nil + case oauth2client.FieldTrustedPeers: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTrustedPeers(v) + return nil + case oauth2client.FieldPublic: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPublic(v) + return nil + case oauth2client.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case oauth2client.FieldLogoURL: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLogoURL(v) + return nil + } + return fmt.Errorf("unknown OAuth2Client field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *OAuth2ClientMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *OAuth2ClientMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *OAuth2ClientMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown OAuth2Client numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *OAuth2ClientMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(oauth2client.FieldRedirectUris) { + fields = append(fields, oauth2client.FieldRedirectUris) + } + if m.FieldCleared(oauth2client.FieldTrustedPeers) { + fields = append(fields, oauth2client.FieldTrustedPeers) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *OAuth2ClientMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *OAuth2ClientMutation) ClearField(name string) error { + switch name { + case oauth2client.FieldRedirectUris: + m.ClearRedirectUris() + return nil + case oauth2client.FieldTrustedPeers: + m.ClearTrustedPeers() + return nil + } + return fmt.Errorf("unknown OAuth2Client nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *OAuth2ClientMutation) ResetField(name string) error { + switch name { + case oauth2client.FieldSecret: + m.ResetSecret() + return nil + case oauth2client.FieldRedirectUris: + m.ResetRedirectUris() + return nil + case oauth2client.FieldTrustedPeers: + m.ResetTrustedPeers() + return nil + case oauth2client.FieldPublic: + m.ResetPublic() + return nil + case oauth2client.FieldName: + m.ResetName() + return nil + case oauth2client.FieldLogoURL: + m.ResetLogoURL() + return nil + } + return fmt.Errorf("unknown OAuth2Client field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *OAuth2ClientMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *OAuth2ClientMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *OAuth2ClientMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *OAuth2ClientMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *OAuth2ClientMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *OAuth2ClientMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *OAuth2ClientMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown OAuth2Client unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *OAuth2ClientMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown OAuth2Client edge %s", name) +} + +// OfflineSessionMutation represents an operation that mutates the OfflineSession nodes in the graph. +type OfflineSessionMutation struct { + config + op Op + typ string + id *string + user_id *string + conn_id *string + refresh *[]byte + connector_data *[]byte + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*OfflineSession, error) + predicates []predicate.OfflineSession +} + +var _ ent.Mutation = (*OfflineSessionMutation)(nil) + +// offlinesessionOption allows management of the mutation configuration using functional options. +type offlinesessionOption func(*OfflineSessionMutation) + +// newOfflineSessionMutation creates new mutation for the OfflineSession entity. +func newOfflineSessionMutation(c config, op Op, opts ...offlinesessionOption) *OfflineSessionMutation { + m := &OfflineSessionMutation{ + config: c, + op: op, + typ: TypeOfflineSession, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withOfflineSessionID sets the ID field of the mutation. +func withOfflineSessionID(id string) offlinesessionOption { + return func(m *OfflineSessionMutation) { + var ( + err error + once sync.Once + value *OfflineSession + ) + m.oldValue = func(ctx context.Context) (*OfflineSession, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().OfflineSession.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withOfflineSession sets the old OfflineSession of the mutation. +func withOfflineSession(node *OfflineSession) offlinesessionOption { + return func(m *OfflineSessionMutation) { + m.oldValue = func(context.Context) (*OfflineSession, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m OfflineSessionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m OfflineSessionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of OfflineSession entities. +func (m *OfflineSessionMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *OfflineSessionMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *OfflineSessionMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().OfflineSession.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetUserID sets the "user_id" field. +func (m *OfflineSessionMutation) SetUserID(s string) { + m.user_id = &s +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *OfflineSessionMutation) UserID() (r string, exists bool) { + v := m.user_id + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the OfflineSession entity. +// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OfflineSessionMutation) OldUserID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *OfflineSessionMutation) ResetUserID() { + m.user_id = nil +} + +// SetConnID sets the "conn_id" field. +func (m *OfflineSessionMutation) SetConnID(s string) { + m.conn_id = &s +} + +// ConnID returns the value of the "conn_id" field in the mutation. +func (m *OfflineSessionMutation) ConnID() (r string, exists bool) { + v := m.conn_id + if v == nil { + return + } + return *v, true +} + +// OldConnID returns the old "conn_id" field's value of the OfflineSession entity. +// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OfflineSessionMutation) OldConnID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConnID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConnID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConnID: %w", err) + } + return oldValue.ConnID, nil +} + +// ResetConnID resets all changes to the "conn_id" field. +func (m *OfflineSessionMutation) ResetConnID() { + m.conn_id = nil +} + +// SetRefresh sets the "refresh" field. +func (m *OfflineSessionMutation) SetRefresh(b []byte) { + m.refresh = &b +} + +// Refresh returns the value of the "refresh" field in the mutation. +func (m *OfflineSessionMutation) Refresh() (r []byte, exists bool) { + v := m.refresh + if v == nil { + return + } + return *v, true +} + +// OldRefresh returns the old "refresh" field's value of the OfflineSession entity. +// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OfflineSessionMutation) OldRefresh(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRefresh is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRefresh requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRefresh: %w", err) + } + return oldValue.Refresh, nil +} + +// ResetRefresh resets all changes to the "refresh" field. +func (m *OfflineSessionMutation) ResetRefresh() { + m.refresh = nil +} + +// SetConnectorData sets the "connector_data" field. +func (m *OfflineSessionMutation) SetConnectorData(b []byte) { + m.connector_data = &b +} + +// ConnectorData returns the value of the "connector_data" field in the mutation. +func (m *OfflineSessionMutation) ConnectorData() (r []byte, exists bool) { + v := m.connector_data + if v == nil { + return + } + return *v, true +} + +// OldConnectorData returns the old "connector_data" field's value of the OfflineSession entity. +// If the OfflineSession object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *OfflineSessionMutation) OldConnectorData(ctx context.Context) (v *[]byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConnectorData is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConnectorData requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConnectorData: %w", err) + } + return oldValue.ConnectorData, nil +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (m *OfflineSessionMutation) ClearConnectorData() { + m.connector_data = nil + m.clearedFields[offlinesession.FieldConnectorData] = struct{}{} +} + +// ConnectorDataCleared returns if the "connector_data" field was cleared in this mutation. +func (m *OfflineSessionMutation) ConnectorDataCleared() bool { + _, ok := m.clearedFields[offlinesession.FieldConnectorData] + return ok +} + +// ResetConnectorData resets all changes to the "connector_data" field. +func (m *OfflineSessionMutation) ResetConnectorData() { + m.connector_data = nil + delete(m.clearedFields, offlinesession.FieldConnectorData) +} + +// Where appends a list predicates to the OfflineSessionMutation builder. +func (m *OfflineSessionMutation) Where(ps ...predicate.OfflineSession) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the OfflineSessionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *OfflineSessionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.OfflineSession, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *OfflineSessionMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *OfflineSessionMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (OfflineSession). +func (m *OfflineSessionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *OfflineSessionMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.user_id != nil { + fields = append(fields, offlinesession.FieldUserID) + } + if m.conn_id != nil { + fields = append(fields, offlinesession.FieldConnID) + } + if m.refresh != nil { + fields = append(fields, offlinesession.FieldRefresh) + } + if m.connector_data != nil { + fields = append(fields, offlinesession.FieldConnectorData) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *OfflineSessionMutation) Field(name string) (ent.Value, bool) { + switch name { + case offlinesession.FieldUserID: + return m.UserID() + case offlinesession.FieldConnID: + return m.ConnID() + case offlinesession.FieldRefresh: + return m.Refresh() + case offlinesession.FieldConnectorData: + return m.ConnectorData() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *OfflineSessionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case offlinesession.FieldUserID: + return m.OldUserID(ctx) + case offlinesession.FieldConnID: + return m.OldConnID(ctx) + case offlinesession.FieldRefresh: + return m.OldRefresh(ctx) + case offlinesession.FieldConnectorData: + return m.OldConnectorData(ctx) + } + return nil, fmt.Errorf("unknown OfflineSession field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *OfflineSessionMutation) SetField(name string, value ent.Value) error { + switch name { + case offlinesession.FieldUserID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case offlinesession.FieldConnID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnID(v) + return nil + case offlinesession.FieldRefresh: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRefresh(v) + return nil + case offlinesession.FieldConnectorData: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnectorData(v) + return nil + } + return fmt.Errorf("unknown OfflineSession field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *OfflineSessionMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *OfflineSessionMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *OfflineSessionMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown OfflineSession numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *OfflineSessionMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(offlinesession.FieldConnectorData) { + fields = append(fields, offlinesession.FieldConnectorData) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *OfflineSessionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *OfflineSessionMutation) ClearField(name string) error { + switch name { + case offlinesession.FieldConnectorData: + m.ClearConnectorData() + return nil + } + return fmt.Errorf("unknown OfflineSession nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *OfflineSessionMutation) ResetField(name string) error { + switch name { + case offlinesession.FieldUserID: + m.ResetUserID() + return nil + case offlinesession.FieldConnID: + m.ResetConnID() + return nil + case offlinesession.FieldRefresh: + m.ResetRefresh() + return nil + case offlinesession.FieldConnectorData: + m.ResetConnectorData() + return nil + } + return fmt.Errorf("unknown OfflineSession field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *OfflineSessionMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *OfflineSessionMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *OfflineSessionMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *OfflineSessionMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *OfflineSessionMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *OfflineSessionMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *OfflineSessionMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown OfflineSession unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *OfflineSessionMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown OfflineSession edge %s", name) +} + +// PasswordMutation represents an operation that mutates the Password nodes in the graph. +type PasswordMutation struct { + config + op Op + typ string + id *int + email *string + hash *[]byte + username *string + user_id *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Password, error) + predicates []predicate.Password +} + +var _ ent.Mutation = (*PasswordMutation)(nil) + +// passwordOption allows management of the mutation configuration using functional options. +type passwordOption func(*PasswordMutation) + +// newPasswordMutation creates new mutation for the Password entity. +func newPasswordMutation(c config, op Op, opts ...passwordOption) *PasswordMutation { + m := &PasswordMutation{ + config: c, + op: op, + typ: TypePassword, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPasswordID sets the ID field of the mutation. +func withPasswordID(id int) passwordOption { + return func(m *PasswordMutation) { + var ( + err error + once sync.Once + value *Password + ) + m.oldValue = func(ctx context.Context) (*Password, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Password.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPassword sets the old Password of the mutation. +func withPassword(node *Password) passwordOption { + return func(m *PasswordMutation) { + m.oldValue = func(context.Context) (*Password, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PasswordMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PasswordMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PasswordMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PasswordMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Password.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetEmail sets the "email" field. +func (m *PasswordMutation) SetEmail(s string) { + m.email = &s +} + +// Email returns the value of the "email" field in the mutation. +func (m *PasswordMutation) Email() (r string, exists bool) { + v := m.email + if v == nil { + return + } + return *v, true +} + +// OldEmail returns the old "email" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEmail: %w", err) + } + return oldValue.Email, nil +} + +// ResetEmail resets all changes to the "email" field. +func (m *PasswordMutation) ResetEmail() { + m.email = nil +} + +// SetHash sets the "hash" field. +func (m *PasswordMutation) SetHash(b []byte) { + m.hash = &b +} + +// Hash returns the value of the "hash" field in the mutation. +func (m *PasswordMutation) Hash() (r []byte, exists bool) { + v := m.hash + if v == nil { + return + } + return *v, true +} + +// OldHash returns the old "hash" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldHash(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldHash: %w", err) + } + return oldValue.Hash, nil +} + +// ResetHash resets all changes to the "hash" field. +func (m *PasswordMutation) ResetHash() { + m.hash = nil +} + +// SetUsername sets the "username" field. +func (m *PasswordMutation) SetUsername(s string) { + m.username = &s +} + +// Username returns the value of the "username" field in the mutation. +func (m *PasswordMutation) Username() (r string, exists bool) { + v := m.username + if v == nil { + return + } + return *v, true +} + +// OldUsername returns the old "username" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsername: %w", err) + } + return oldValue.Username, nil +} + +// ResetUsername resets all changes to the "username" field. +func (m *PasswordMutation) ResetUsername() { + m.username = nil +} + +// SetUserID sets the "user_id" field. +func (m *PasswordMutation) SetUserID(s string) { + m.user_id = &s +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *PasswordMutation) UserID() (r string, exists bool) { + v := m.user_id + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the Password entity. +// If the Password object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PasswordMutation) OldUserID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *PasswordMutation) ResetUserID() { + m.user_id = nil +} + +// Where appends a list predicates to the PasswordMutation builder. +func (m *PasswordMutation) Where(ps ...predicate.Password) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PasswordMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PasswordMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Password, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PasswordMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PasswordMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Password). +func (m *PasswordMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PasswordMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.email != nil { + fields = append(fields, password.FieldEmail) + } + if m.hash != nil { + fields = append(fields, password.FieldHash) + } + if m.username != nil { + fields = append(fields, password.FieldUsername) + } + if m.user_id != nil { + fields = append(fields, password.FieldUserID) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PasswordMutation) Field(name string) (ent.Value, bool) { + switch name { + case password.FieldEmail: + return m.Email() + case password.FieldHash: + return m.Hash() + case password.FieldUsername: + return m.Username() + case password.FieldUserID: + return m.UserID() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PasswordMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case password.FieldEmail: + return m.OldEmail(ctx) + case password.FieldHash: + return m.OldHash(ctx) + case password.FieldUsername: + return m.OldUsername(ctx) + case password.FieldUserID: + return m.OldUserID(ctx) + } + return nil, fmt.Errorf("unknown Password field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PasswordMutation) SetField(name string, value ent.Value) error { + switch name { + case password.FieldEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEmail(v) + return nil + case password.FieldHash: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetHash(v) + return nil + case password.FieldUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsername(v) + return nil + case password.FieldUserID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + } + return fmt.Errorf("unknown Password field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PasswordMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PasswordMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PasswordMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Password numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PasswordMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PasswordMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PasswordMutation) ClearField(name string) error { + return fmt.Errorf("unknown Password nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PasswordMutation) ResetField(name string) error { + switch name { + case password.FieldEmail: + m.ResetEmail() + return nil + case password.FieldHash: + m.ResetHash() + return nil + case password.FieldUsername: + m.ResetUsername() + return nil + case password.FieldUserID: + m.ResetUserID() + return nil + } + return fmt.Errorf("unknown Password field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PasswordMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PasswordMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PasswordMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PasswordMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PasswordMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PasswordMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PasswordMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Password unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PasswordMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Password edge %s", name) +} + +// RefreshTokenMutation represents an operation that mutates the RefreshToken nodes in the graph. +type RefreshTokenMutation struct { + config + op Op + typ string + id *string + client_id *string + scopes *[]string + appendscopes []string + nonce *string + claims_user_id *string + claims_username *string + claims_email *string + claims_email_verified *bool + claims_groups *[]string + appendclaims_groups []string + claims_preferred_username *string + connector_id *string + connector_data *[]byte + token *string + obsolete_token *string + created_at *time.Time + last_used *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*RefreshToken, error) + predicates []predicate.RefreshToken +} + +var _ ent.Mutation = (*RefreshTokenMutation)(nil) + +// refreshtokenOption allows management of the mutation configuration using functional options. +type refreshtokenOption func(*RefreshTokenMutation) + +// newRefreshTokenMutation creates new mutation for the RefreshToken entity. +func newRefreshTokenMutation(c config, op Op, opts ...refreshtokenOption) *RefreshTokenMutation { + m := &RefreshTokenMutation{ + config: c, + op: op, + typ: TypeRefreshToken, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withRefreshTokenID sets the ID field of the mutation. +func withRefreshTokenID(id string) refreshtokenOption { + return func(m *RefreshTokenMutation) { + var ( + err error + once sync.Once + value *RefreshToken + ) + m.oldValue = func(ctx context.Context) (*RefreshToken, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().RefreshToken.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withRefreshToken sets the old RefreshToken of the mutation. +func withRefreshToken(node *RefreshToken) refreshtokenOption { + return func(m *RefreshTokenMutation) { + m.oldValue = func(context.Context) (*RefreshToken, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m RefreshTokenMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m RefreshTokenMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of RefreshToken entities. +func (m *RefreshTokenMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *RefreshTokenMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *RefreshTokenMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().RefreshToken.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetClientID sets the "client_id" field. +func (m *RefreshTokenMutation) SetClientID(s string) { + m.client_id = &s +} + +// ClientID returns the value of the "client_id" field in the mutation. +func (m *RefreshTokenMutation) ClientID() (r string, exists bool) { + v := m.client_id + if v == nil { + return + } + return *v, true +} + +// OldClientID returns the old "client_id" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClientID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClientID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClientID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClientID: %w", err) + } + return oldValue.ClientID, nil +} + +// ResetClientID resets all changes to the "client_id" field. +func (m *RefreshTokenMutation) ResetClientID() { + m.client_id = nil +} + +// SetScopes sets the "scopes" field. +func (m *RefreshTokenMutation) SetScopes(s []string) { + m.scopes = &s + m.appendscopes = nil +} + +// Scopes returns the value of the "scopes" field in the mutation. +func (m *RefreshTokenMutation) Scopes() (r []string, exists bool) { + v := m.scopes + if v == nil { + return + } + return *v, true +} + +// OldScopes returns the old "scopes" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldScopes(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScopes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScopes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScopes: %w", err) + } + return oldValue.Scopes, nil +} + +// AppendScopes adds s to the "scopes" field. +func (m *RefreshTokenMutation) AppendScopes(s []string) { + m.appendscopes = append(m.appendscopes, s...) +} + +// AppendedScopes returns the list of values that were appended to the "scopes" field in this mutation. +func (m *RefreshTokenMutation) AppendedScopes() ([]string, bool) { + if len(m.appendscopes) == 0 { + return nil, false + } + return m.appendscopes, true +} + +// ClearScopes clears the value of the "scopes" field. +func (m *RefreshTokenMutation) ClearScopes() { + m.scopes = nil + m.appendscopes = nil + m.clearedFields[refreshtoken.FieldScopes] = struct{}{} +} + +// ScopesCleared returns if the "scopes" field was cleared in this mutation. +func (m *RefreshTokenMutation) ScopesCleared() bool { + _, ok := m.clearedFields[refreshtoken.FieldScopes] + return ok +} + +// ResetScopes resets all changes to the "scopes" field. +func (m *RefreshTokenMutation) ResetScopes() { + m.scopes = nil + m.appendscopes = nil + delete(m.clearedFields, refreshtoken.FieldScopes) +} + +// SetNonce sets the "nonce" field. +func (m *RefreshTokenMutation) SetNonce(s string) { + m.nonce = &s +} + +// Nonce returns the value of the "nonce" field in the mutation. +func (m *RefreshTokenMutation) Nonce() (r string, exists bool) { + v := m.nonce + if v == nil { + return + } + return *v, true +} + +// OldNonce returns the old "nonce" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldNonce(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNonce is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNonce requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNonce: %w", err) + } + return oldValue.Nonce, nil +} + +// ResetNonce resets all changes to the "nonce" field. +func (m *RefreshTokenMutation) ResetNonce() { + m.nonce = nil +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (m *RefreshTokenMutation) SetClaimsUserID(s string) { + m.claims_user_id = &s +} + +// ClaimsUserID returns the value of the "claims_user_id" field in the mutation. +func (m *RefreshTokenMutation) ClaimsUserID() (r string, exists bool) { + v := m.claims_user_id + if v == nil { + return + } + return *v, true +} + +// OldClaimsUserID returns the old "claims_user_id" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClaimsUserID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsUserID: %w", err) + } + return oldValue.ClaimsUserID, nil +} + +// ResetClaimsUserID resets all changes to the "claims_user_id" field. +func (m *RefreshTokenMutation) ResetClaimsUserID() { + m.claims_user_id = nil +} + +// SetClaimsUsername sets the "claims_username" field. +func (m *RefreshTokenMutation) SetClaimsUsername(s string) { + m.claims_username = &s +} + +// ClaimsUsername returns the value of the "claims_username" field in the mutation. +func (m *RefreshTokenMutation) ClaimsUsername() (r string, exists bool) { + v := m.claims_username + if v == nil { + return + } + return *v, true +} + +// OldClaimsUsername returns the old "claims_username" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClaimsUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsUsername: %w", err) + } + return oldValue.ClaimsUsername, nil +} + +// ResetClaimsUsername resets all changes to the "claims_username" field. +func (m *RefreshTokenMutation) ResetClaimsUsername() { + m.claims_username = nil +} + +// SetClaimsEmail sets the "claims_email" field. +func (m *RefreshTokenMutation) SetClaimsEmail(s string) { + m.claims_email = &s +} + +// ClaimsEmail returns the value of the "claims_email" field in the mutation. +func (m *RefreshTokenMutation) ClaimsEmail() (r string, exists bool) { + v := m.claims_email + if v == nil { + return + } + return *v, true +} + +// OldClaimsEmail returns the old "claims_email" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClaimsEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsEmail: %w", err) + } + return oldValue.ClaimsEmail, nil +} + +// ResetClaimsEmail resets all changes to the "claims_email" field. +func (m *RefreshTokenMutation) ResetClaimsEmail() { + m.claims_email = nil +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (m *RefreshTokenMutation) SetClaimsEmailVerified(b bool) { + m.claims_email_verified = &b +} + +// ClaimsEmailVerified returns the value of the "claims_email_verified" field in the mutation. +func (m *RefreshTokenMutation) ClaimsEmailVerified() (r bool, exists bool) { + v := m.claims_email_verified + if v == nil { + return + } + return *v, true +} + +// OldClaimsEmailVerified returns the old "claims_email_verified" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClaimsEmailVerified(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsEmailVerified is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsEmailVerified requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsEmailVerified: %w", err) + } + return oldValue.ClaimsEmailVerified, nil +} + +// ResetClaimsEmailVerified resets all changes to the "claims_email_verified" field. +func (m *RefreshTokenMutation) ResetClaimsEmailVerified() { + m.claims_email_verified = nil +} + +// SetClaimsGroups sets the "claims_groups" field. +func (m *RefreshTokenMutation) SetClaimsGroups(s []string) { + m.claims_groups = &s + m.appendclaims_groups = nil +} + +// ClaimsGroups returns the value of the "claims_groups" field in the mutation. +func (m *RefreshTokenMutation) ClaimsGroups() (r []string, exists bool) { + v := m.claims_groups + if v == nil { + return + } + return *v, true +} + +// OldClaimsGroups returns the old "claims_groups" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClaimsGroups(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsGroups is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsGroups requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsGroups: %w", err) + } + return oldValue.ClaimsGroups, nil +} + +// AppendClaimsGroups adds s to the "claims_groups" field. +func (m *RefreshTokenMutation) AppendClaimsGroups(s []string) { + m.appendclaims_groups = append(m.appendclaims_groups, s...) +} + +// AppendedClaimsGroups returns the list of values that were appended to the "claims_groups" field in this mutation. +func (m *RefreshTokenMutation) AppendedClaimsGroups() ([]string, bool) { + if len(m.appendclaims_groups) == 0 { + return nil, false + } + return m.appendclaims_groups, true +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (m *RefreshTokenMutation) ClearClaimsGroups() { + m.claims_groups = nil + m.appendclaims_groups = nil + m.clearedFields[refreshtoken.FieldClaimsGroups] = struct{}{} +} + +// ClaimsGroupsCleared returns if the "claims_groups" field was cleared in this mutation. +func (m *RefreshTokenMutation) ClaimsGroupsCleared() bool { + _, ok := m.clearedFields[refreshtoken.FieldClaimsGroups] + return ok +} + +// ResetClaimsGroups resets all changes to the "claims_groups" field. +func (m *RefreshTokenMutation) ResetClaimsGroups() { + m.claims_groups = nil + m.appendclaims_groups = nil + delete(m.clearedFields, refreshtoken.FieldClaimsGroups) +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (m *RefreshTokenMutation) SetClaimsPreferredUsername(s string) { + m.claims_preferred_username = &s +} + +// ClaimsPreferredUsername returns the value of the "claims_preferred_username" field in the mutation. +func (m *RefreshTokenMutation) ClaimsPreferredUsername() (r string, exists bool) { + v := m.claims_preferred_username + if v == nil { + return + } + return *v, true +} + +// OldClaimsPreferredUsername returns the old "claims_preferred_username" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldClaimsPreferredUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaimsPreferredUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaimsPreferredUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaimsPreferredUsername: %w", err) + } + return oldValue.ClaimsPreferredUsername, nil +} + +// ResetClaimsPreferredUsername resets all changes to the "claims_preferred_username" field. +func (m *RefreshTokenMutation) ResetClaimsPreferredUsername() { + m.claims_preferred_username = nil +} + +// SetConnectorID sets the "connector_id" field. +func (m *RefreshTokenMutation) SetConnectorID(s string) { + m.connector_id = &s +} + +// ConnectorID returns the value of the "connector_id" field in the mutation. +func (m *RefreshTokenMutation) ConnectorID() (r string, exists bool) { + v := m.connector_id + if v == nil { + return + } + return *v, true +} + +// OldConnectorID returns the old "connector_id" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldConnectorID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConnectorID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConnectorID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConnectorID: %w", err) + } + return oldValue.ConnectorID, nil +} + +// ResetConnectorID resets all changes to the "connector_id" field. +func (m *RefreshTokenMutation) ResetConnectorID() { + m.connector_id = nil +} + +// SetConnectorData sets the "connector_data" field. +func (m *RefreshTokenMutation) SetConnectorData(b []byte) { + m.connector_data = &b +} + +// ConnectorData returns the value of the "connector_data" field in the mutation. +func (m *RefreshTokenMutation) ConnectorData() (r []byte, exists bool) { + v := m.connector_data + if v == nil { + return + } + return *v, true +} + +// OldConnectorData returns the old "connector_data" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldConnectorData(ctx context.Context) (v *[]byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConnectorData is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConnectorData requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConnectorData: %w", err) + } + return oldValue.ConnectorData, nil +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (m *RefreshTokenMutation) ClearConnectorData() { + m.connector_data = nil + m.clearedFields[refreshtoken.FieldConnectorData] = struct{}{} +} + +// ConnectorDataCleared returns if the "connector_data" field was cleared in this mutation. +func (m *RefreshTokenMutation) ConnectorDataCleared() bool { + _, ok := m.clearedFields[refreshtoken.FieldConnectorData] + return ok +} + +// ResetConnectorData resets all changes to the "connector_data" field. +func (m *RefreshTokenMutation) ResetConnectorData() { + m.connector_data = nil + delete(m.clearedFields, refreshtoken.FieldConnectorData) +} + +// SetToken sets the "token" field. +func (m *RefreshTokenMutation) SetToken(s string) { + m.token = &s +} + +// Token returns the value of the "token" field in the mutation. +func (m *RefreshTokenMutation) Token() (r string, exists bool) { + v := m.token + if v == nil { + return + } + return *v, true +} + +// OldToken returns the old "token" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldToken(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldToken is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldToken requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldToken: %w", err) + } + return oldValue.Token, nil +} + +// ResetToken resets all changes to the "token" field. +func (m *RefreshTokenMutation) ResetToken() { + m.token = nil +} + +// SetObsoleteToken sets the "obsolete_token" field. +func (m *RefreshTokenMutation) SetObsoleteToken(s string) { + m.obsolete_token = &s +} + +// ObsoleteToken returns the value of the "obsolete_token" field in the mutation. +func (m *RefreshTokenMutation) ObsoleteToken() (r string, exists bool) { + v := m.obsolete_token + if v == nil { + return + } + return *v, true +} + +// OldObsoleteToken returns the old "obsolete_token" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldObsoleteToken(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldObsoleteToken is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldObsoleteToken requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldObsoleteToken: %w", err) + } + return oldValue.ObsoleteToken, nil +} + +// ResetObsoleteToken resets all changes to the "obsolete_token" field. +func (m *RefreshTokenMutation) ResetObsoleteToken() { + m.obsolete_token = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *RefreshTokenMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *RefreshTokenMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *RefreshTokenMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetLastUsed sets the "last_used" field. +func (m *RefreshTokenMutation) SetLastUsed(t time.Time) { + m.last_used = &t +} + +// LastUsed returns the value of the "last_used" field in the mutation. +func (m *RefreshTokenMutation) LastUsed() (r time.Time, exists bool) { + v := m.last_used + if v == nil { + return + } + return *v, true +} + +// OldLastUsed returns the old "last_used" field's value of the RefreshToken entity. +// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RefreshTokenMutation) OldLastUsed(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLastUsed is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLastUsed requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastUsed: %w", err) + } + return oldValue.LastUsed, nil +} + +// ResetLastUsed resets all changes to the "last_used" field. +func (m *RefreshTokenMutation) ResetLastUsed() { + m.last_used = nil +} + +// Where appends a list predicates to the RefreshTokenMutation builder. +func (m *RefreshTokenMutation) Where(ps ...predicate.RefreshToken) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the RefreshTokenMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *RefreshTokenMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.RefreshToken, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *RefreshTokenMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *RefreshTokenMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (RefreshToken). +func (m *RefreshTokenMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *RefreshTokenMutation) Fields() []string { + fields := make([]string, 0, 15) + if m.client_id != nil { + fields = append(fields, refreshtoken.FieldClientID) + } + if m.scopes != nil { + fields = append(fields, refreshtoken.FieldScopes) + } + if m.nonce != nil { + fields = append(fields, refreshtoken.FieldNonce) + } + if m.claims_user_id != nil { + fields = append(fields, refreshtoken.FieldClaimsUserID) + } + if m.claims_username != nil { + fields = append(fields, refreshtoken.FieldClaimsUsername) + } + if m.claims_email != nil { + fields = append(fields, refreshtoken.FieldClaimsEmail) + } + if m.claims_email_verified != nil { + fields = append(fields, refreshtoken.FieldClaimsEmailVerified) + } + if m.claims_groups != nil { + fields = append(fields, refreshtoken.FieldClaimsGroups) + } + if m.claims_preferred_username != nil { + fields = append(fields, refreshtoken.FieldClaimsPreferredUsername) + } + if m.connector_id != nil { + fields = append(fields, refreshtoken.FieldConnectorID) + } + if m.connector_data != nil { + fields = append(fields, refreshtoken.FieldConnectorData) + } + if m.token != nil { + fields = append(fields, refreshtoken.FieldToken) + } + if m.obsolete_token != nil { + fields = append(fields, refreshtoken.FieldObsoleteToken) + } + if m.created_at != nil { + fields = append(fields, refreshtoken.FieldCreatedAt) + } + if m.last_used != nil { + fields = append(fields, refreshtoken.FieldLastUsed) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *RefreshTokenMutation) Field(name string) (ent.Value, bool) { + switch name { + case refreshtoken.FieldClientID: + return m.ClientID() + case refreshtoken.FieldScopes: + return m.Scopes() + case refreshtoken.FieldNonce: + return m.Nonce() + case refreshtoken.FieldClaimsUserID: + return m.ClaimsUserID() + case refreshtoken.FieldClaimsUsername: + return m.ClaimsUsername() + case refreshtoken.FieldClaimsEmail: + return m.ClaimsEmail() + case refreshtoken.FieldClaimsEmailVerified: + return m.ClaimsEmailVerified() + case refreshtoken.FieldClaimsGroups: + return m.ClaimsGroups() + case refreshtoken.FieldClaimsPreferredUsername: + return m.ClaimsPreferredUsername() + case refreshtoken.FieldConnectorID: + return m.ConnectorID() + case refreshtoken.FieldConnectorData: + return m.ConnectorData() + case refreshtoken.FieldToken: + return m.Token() + case refreshtoken.FieldObsoleteToken: + return m.ObsoleteToken() + case refreshtoken.FieldCreatedAt: + return m.CreatedAt() + case refreshtoken.FieldLastUsed: + return m.LastUsed() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *RefreshTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case refreshtoken.FieldClientID: + return m.OldClientID(ctx) + case refreshtoken.FieldScopes: + return m.OldScopes(ctx) + case refreshtoken.FieldNonce: + return m.OldNonce(ctx) + case refreshtoken.FieldClaimsUserID: + return m.OldClaimsUserID(ctx) + case refreshtoken.FieldClaimsUsername: + return m.OldClaimsUsername(ctx) + case refreshtoken.FieldClaimsEmail: + return m.OldClaimsEmail(ctx) + case refreshtoken.FieldClaimsEmailVerified: + return m.OldClaimsEmailVerified(ctx) + case refreshtoken.FieldClaimsGroups: + return m.OldClaimsGroups(ctx) + case refreshtoken.FieldClaimsPreferredUsername: + return m.OldClaimsPreferredUsername(ctx) + case refreshtoken.FieldConnectorID: + return m.OldConnectorID(ctx) + case refreshtoken.FieldConnectorData: + return m.OldConnectorData(ctx) + case refreshtoken.FieldToken: + return m.OldToken(ctx) + case refreshtoken.FieldObsoleteToken: + return m.OldObsoleteToken(ctx) + case refreshtoken.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case refreshtoken.FieldLastUsed: + return m.OldLastUsed(ctx) + } + return nil, fmt.Errorf("unknown RefreshToken field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *RefreshTokenMutation) SetField(name string, value ent.Value) error { + switch name { + case refreshtoken.FieldClientID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClientID(v) + return nil + case refreshtoken.FieldScopes: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScopes(v) + return nil + case refreshtoken.FieldNonce: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNonce(v) + return nil + case refreshtoken.FieldClaimsUserID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsUserID(v) + return nil + case refreshtoken.FieldClaimsUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsUsername(v) + return nil + case refreshtoken.FieldClaimsEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsEmail(v) + return nil + case refreshtoken.FieldClaimsEmailVerified: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsEmailVerified(v) + return nil + case refreshtoken.FieldClaimsGroups: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsGroups(v) + return nil + case refreshtoken.FieldClaimsPreferredUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaimsPreferredUsername(v) + return nil + case refreshtoken.FieldConnectorID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnectorID(v) + return nil + case refreshtoken.FieldConnectorData: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConnectorData(v) + return nil + case refreshtoken.FieldToken: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetToken(v) + return nil + case refreshtoken.FieldObsoleteToken: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetObsoleteToken(v) + return nil + case refreshtoken.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case refreshtoken.FieldLastUsed: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastUsed(v) + return nil + } + return fmt.Errorf("unknown RefreshToken field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *RefreshTokenMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *RefreshTokenMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *RefreshTokenMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown RefreshToken numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *RefreshTokenMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(refreshtoken.FieldScopes) { + fields = append(fields, refreshtoken.FieldScopes) + } + if m.FieldCleared(refreshtoken.FieldClaimsGroups) { + fields = append(fields, refreshtoken.FieldClaimsGroups) + } + if m.FieldCleared(refreshtoken.FieldConnectorData) { + fields = append(fields, refreshtoken.FieldConnectorData) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *RefreshTokenMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *RefreshTokenMutation) ClearField(name string) error { + switch name { + case refreshtoken.FieldScopes: + m.ClearScopes() + return nil + case refreshtoken.FieldClaimsGroups: + m.ClearClaimsGroups() + return nil + case refreshtoken.FieldConnectorData: + m.ClearConnectorData() + return nil + } + return fmt.Errorf("unknown RefreshToken nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *RefreshTokenMutation) ResetField(name string) error { + switch name { + case refreshtoken.FieldClientID: + m.ResetClientID() + return nil + case refreshtoken.FieldScopes: + m.ResetScopes() + return nil + case refreshtoken.FieldNonce: + m.ResetNonce() + return nil + case refreshtoken.FieldClaimsUserID: + m.ResetClaimsUserID() + return nil + case refreshtoken.FieldClaimsUsername: + m.ResetClaimsUsername() + return nil + case refreshtoken.FieldClaimsEmail: + m.ResetClaimsEmail() + return nil + case refreshtoken.FieldClaimsEmailVerified: + m.ResetClaimsEmailVerified() + return nil + case refreshtoken.FieldClaimsGroups: + m.ResetClaimsGroups() + return nil + case refreshtoken.FieldClaimsPreferredUsername: + m.ResetClaimsPreferredUsername() + return nil + case refreshtoken.FieldConnectorID: + m.ResetConnectorID() + return nil + case refreshtoken.FieldConnectorData: + m.ResetConnectorData() + return nil + case refreshtoken.FieldToken: + m.ResetToken() + return nil + case refreshtoken.FieldObsoleteToken: + m.ResetObsoleteToken() + return nil + case refreshtoken.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case refreshtoken.FieldLastUsed: + m.ResetLastUsed() + return nil + } + return fmt.Errorf("unknown RefreshToken field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *RefreshTokenMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *RefreshTokenMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *RefreshTokenMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *RefreshTokenMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *RefreshTokenMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *RefreshTokenMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *RefreshTokenMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown RefreshToken unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *RefreshTokenMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown RefreshToken edge %s", name) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client.go b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client.go new file mode 100644 index 00000000..39a4cf82 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client.go @@ -0,0 +1,165 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "encoding/json" + "fmt" + "strings" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/oauth2client" +) + +// OAuth2Client is the model entity for the OAuth2Client schema. +type OAuth2Client struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // Secret holds the value of the "secret" field. + Secret string `json:"secret,omitempty"` + // RedirectUris holds the value of the "redirect_uris" field. + RedirectUris []string `json:"redirect_uris,omitempty"` + // TrustedPeers holds the value of the "trusted_peers" field. + TrustedPeers []string `json:"trusted_peers,omitempty"` + // Public holds the value of the "public" field. + Public bool `json:"public,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // LogoURL holds the value of the "logo_url" field. + LogoURL string `json:"logo_url,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*OAuth2Client) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case oauth2client.FieldRedirectUris, oauth2client.FieldTrustedPeers: + values[i] = new([]byte) + case oauth2client.FieldPublic: + values[i] = new(sql.NullBool) + case oauth2client.FieldID, oauth2client.FieldSecret, oauth2client.FieldName, oauth2client.FieldLogoURL: + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the OAuth2Client fields. +func (o *OAuth2Client) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case oauth2client.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + o.ID = value.String + } + case oauth2client.FieldSecret: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field secret", values[i]) + } else if value.Valid { + o.Secret = value.String + } + case oauth2client.FieldRedirectUris: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field redirect_uris", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &o.RedirectUris); err != nil { + return fmt.Errorf("unmarshal field redirect_uris: %w", err) + } + } + case oauth2client.FieldTrustedPeers: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field trusted_peers", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &o.TrustedPeers); err != nil { + return fmt.Errorf("unmarshal field trusted_peers: %w", err) + } + } + case oauth2client.FieldPublic: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field public", values[i]) + } else if value.Valid { + o.Public = value.Bool + } + case oauth2client.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + o.Name = value.String + } + case oauth2client.FieldLogoURL: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field logo_url", values[i]) + } else if value.Valid { + o.LogoURL = value.String + } + default: + o.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the OAuth2Client. +// This includes values selected through modifiers, order, etc. +func (o *OAuth2Client) Value(name string) (ent.Value, error) { + return o.selectValues.Get(name) +} + +// Update returns a builder for updating this OAuth2Client. +// Note that you need to call OAuth2Client.Unwrap() before calling this method if this OAuth2Client +// was returned from a transaction, and the transaction was committed or rolled back. +func (o *OAuth2Client) Update() *OAuth2ClientUpdateOne { + return NewOAuth2ClientClient(o.config).UpdateOne(o) +} + +// Unwrap unwraps the OAuth2Client entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (o *OAuth2Client) Unwrap() *OAuth2Client { + _tx, ok := o.config.driver.(*txDriver) + if !ok { + panic("db: OAuth2Client is not a transactional entity") + } + o.config.driver = _tx.drv + return o +} + +// String implements the fmt.Stringer. +func (o *OAuth2Client) String() string { + var builder strings.Builder + builder.WriteString("OAuth2Client(") + builder.WriteString(fmt.Sprintf("id=%v, ", o.ID)) + builder.WriteString("secret=") + builder.WriteString(o.Secret) + builder.WriteString(", ") + builder.WriteString("redirect_uris=") + builder.WriteString(fmt.Sprintf("%v", o.RedirectUris)) + builder.WriteString(", ") + builder.WriteString("trusted_peers=") + builder.WriteString(fmt.Sprintf("%v", o.TrustedPeers)) + builder.WriteString(", ") + builder.WriteString("public=") + builder.WriteString(fmt.Sprintf("%v", o.Public)) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(o.Name) + builder.WriteString(", ") + builder.WriteString("logo_url=") + builder.WriteString(o.LogoURL) + builder.WriteByte(')') + return builder.String() +} + +// OAuth2Clients is a parsable slice of OAuth2Client. +type OAuth2Clients []*OAuth2Client diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/BUILD new file mode 100644 index 00000000..f111f60e --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "oauth2client", + srcs = [ + "oauth2client.go", + "where.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client", + importpath = "github.com/dexidp/dex/storage/ent/db/oauth2client", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/oauth2client.go b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/oauth2client.go new file mode 100644 index 00000000..08df76be --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/oauth2client.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package oauth2client + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the oauth2client type in the database. + Label = "oauth2client" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldSecret holds the string denoting the secret field in the database. + FieldSecret = "secret" + // FieldRedirectUris holds the string denoting the redirect_uris field in the database. + FieldRedirectUris = "redirect_uris" + // FieldTrustedPeers holds the string denoting the trusted_peers field in the database. + FieldTrustedPeers = "trusted_peers" + // FieldPublic holds the string denoting the public field in the database. + FieldPublic = "public" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldLogoURL holds the string denoting the logo_url field in the database. + FieldLogoURL = "logo_url" + // Table holds the table name of the oauth2client in the database. + Table = "oauth2clients" +) + +// Columns holds all SQL columns for oauth2client fields. +var Columns = []string{ + FieldID, + FieldSecret, + FieldRedirectUris, + FieldTrustedPeers, + FieldPublic, + FieldName, + FieldLogoURL, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // SecretValidator is a validator for the "secret" field. It is called by the builders before save. + SecretValidator func(string) error + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // LogoURLValidator is a validator for the "logo_url" field. It is called by the builders before save. + LogoURLValidator func(string) error + // IDValidator is a validator for the "id" field. It is called by the builders before save. + IDValidator func(string) error +) + +// OrderOption defines the ordering options for the OAuth2Client queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// BySecret orders the results by the secret field. +func BySecret(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSecret, opts...).ToFunc() +} + +// ByPublic orders the results by the public field. +func ByPublic(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPublic, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByLogoURL orders the results by the logo_url field. +func ByLogoURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLogoURL, opts...).ToFunc() +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/where.go b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/where.go new file mode 100644 index 00000000..7cde46b5 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client/where.go @@ -0,0 +1,340 @@ +// Code generated by ent, DO NOT EDIT. + +package oauth2client + +import ( + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldContainsFold(FieldID, id)) +} + +// Secret applies equality check predicate on the "secret" field. It's identical to SecretEQ. +func Secret(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEQ(FieldSecret, v)) +} + +// Public applies equality check predicate on the "public" field. It's identical to PublicEQ. +func Public(v bool) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEQ(FieldPublic, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEQ(FieldName, v)) +} + +// LogoURL applies equality check predicate on the "logo_url" field. It's identical to LogoURLEQ. +func LogoURL(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEQ(FieldLogoURL, v)) +} + +// SecretEQ applies the EQ predicate on the "secret" field. +func SecretEQ(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEQ(FieldSecret, v)) +} + +// SecretNEQ applies the NEQ predicate on the "secret" field. +func SecretNEQ(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNEQ(FieldSecret, v)) +} + +// SecretIn applies the In predicate on the "secret" field. +func SecretIn(vs ...string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldIn(FieldSecret, vs...)) +} + +// SecretNotIn applies the NotIn predicate on the "secret" field. +func SecretNotIn(vs ...string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNotIn(FieldSecret, vs...)) +} + +// SecretGT applies the GT predicate on the "secret" field. +func SecretGT(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldGT(FieldSecret, v)) +} + +// SecretGTE applies the GTE predicate on the "secret" field. +func SecretGTE(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldGTE(FieldSecret, v)) +} + +// SecretLT applies the LT predicate on the "secret" field. +func SecretLT(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldLT(FieldSecret, v)) +} + +// SecretLTE applies the LTE predicate on the "secret" field. +func SecretLTE(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldLTE(FieldSecret, v)) +} + +// SecretContains applies the Contains predicate on the "secret" field. +func SecretContains(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldContains(FieldSecret, v)) +} + +// SecretHasPrefix applies the HasPrefix predicate on the "secret" field. +func SecretHasPrefix(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldHasPrefix(FieldSecret, v)) +} + +// SecretHasSuffix applies the HasSuffix predicate on the "secret" field. +func SecretHasSuffix(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldHasSuffix(FieldSecret, v)) +} + +// SecretEqualFold applies the EqualFold predicate on the "secret" field. +func SecretEqualFold(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEqualFold(FieldSecret, v)) +} + +// SecretContainsFold applies the ContainsFold predicate on the "secret" field. +func SecretContainsFold(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldContainsFold(FieldSecret, v)) +} + +// RedirectUrisIsNil applies the IsNil predicate on the "redirect_uris" field. +func RedirectUrisIsNil() predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldIsNull(FieldRedirectUris)) +} + +// RedirectUrisNotNil applies the NotNil predicate on the "redirect_uris" field. +func RedirectUrisNotNil() predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNotNull(FieldRedirectUris)) +} + +// TrustedPeersIsNil applies the IsNil predicate on the "trusted_peers" field. +func TrustedPeersIsNil() predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldIsNull(FieldTrustedPeers)) +} + +// TrustedPeersNotNil applies the NotNil predicate on the "trusted_peers" field. +func TrustedPeersNotNil() predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNotNull(FieldTrustedPeers)) +} + +// PublicEQ applies the EQ predicate on the "public" field. +func PublicEQ(v bool) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEQ(FieldPublic, v)) +} + +// PublicNEQ applies the NEQ predicate on the "public" field. +func PublicNEQ(v bool) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNEQ(FieldPublic, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldContainsFold(FieldName, v)) +} + +// LogoURLEQ applies the EQ predicate on the "logo_url" field. +func LogoURLEQ(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEQ(FieldLogoURL, v)) +} + +// LogoURLNEQ applies the NEQ predicate on the "logo_url" field. +func LogoURLNEQ(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNEQ(FieldLogoURL, v)) +} + +// LogoURLIn applies the In predicate on the "logo_url" field. +func LogoURLIn(vs ...string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldIn(FieldLogoURL, vs...)) +} + +// LogoURLNotIn applies the NotIn predicate on the "logo_url" field. +func LogoURLNotIn(vs ...string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldNotIn(FieldLogoURL, vs...)) +} + +// LogoURLGT applies the GT predicate on the "logo_url" field. +func LogoURLGT(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldGT(FieldLogoURL, v)) +} + +// LogoURLGTE applies the GTE predicate on the "logo_url" field. +func LogoURLGTE(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldGTE(FieldLogoURL, v)) +} + +// LogoURLLT applies the LT predicate on the "logo_url" field. +func LogoURLLT(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldLT(FieldLogoURL, v)) +} + +// LogoURLLTE applies the LTE predicate on the "logo_url" field. +func LogoURLLTE(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldLTE(FieldLogoURL, v)) +} + +// LogoURLContains applies the Contains predicate on the "logo_url" field. +func LogoURLContains(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldContains(FieldLogoURL, v)) +} + +// LogoURLHasPrefix applies the HasPrefix predicate on the "logo_url" field. +func LogoURLHasPrefix(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldHasPrefix(FieldLogoURL, v)) +} + +// LogoURLHasSuffix applies the HasSuffix predicate on the "logo_url" field. +func LogoURLHasSuffix(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldHasSuffix(FieldLogoURL, v)) +} + +// LogoURLEqualFold applies the EqualFold predicate on the "logo_url" field. +func LogoURLEqualFold(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldEqualFold(FieldLogoURL, v)) +} + +// LogoURLContainsFold applies the ContainsFold predicate on the "logo_url" field. +func LogoURLContainsFold(v string) predicate.OAuth2Client { + return predicate.OAuth2Client(sql.FieldContainsFold(FieldLogoURL, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.OAuth2Client) predicate.OAuth2Client { + return predicate.OAuth2Client(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.OAuth2Client) predicate.OAuth2Client { + return predicate.OAuth2Client(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.OAuth2Client) predicate.OAuth2Client { + return predicate.OAuth2Client(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_create.go b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_create.go new file mode 100644 index 00000000..f4a47d55 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_create.go @@ -0,0 +1,269 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/oauth2client" +) + +// OAuth2ClientCreate is the builder for creating a OAuth2Client entity. +type OAuth2ClientCreate struct { + config + mutation *OAuth2ClientMutation + hooks []Hook +} + +// SetSecret sets the "secret" field. +func (oc *OAuth2ClientCreate) SetSecret(s string) *OAuth2ClientCreate { + oc.mutation.SetSecret(s) + return oc +} + +// SetRedirectUris sets the "redirect_uris" field. +func (oc *OAuth2ClientCreate) SetRedirectUris(s []string) *OAuth2ClientCreate { + oc.mutation.SetRedirectUris(s) + return oc +} + +// SetTrustedPeers sets the "trusted_peers" field. +func (oc *OAuth2ClientCreate) SetTrustedPeers(s []string) *OAuth2ClientCreate { + oc.mutation.SetTrustedPeers(s) + return oc +} + +// SetPublic sets the "public" field. +func (oc *OAuth2ClientCreate) SetPublic(b bool) *OAuth2ClientCreate { + oc.mutation.SetPublic(b) + return oc +} + +// SetName sets the "name" field. +func (oc *OAuth2ClientCreate) SetName(s string) *OAuth2ClientCreate { + oc.mutation.SetName(s) + return oc +} + +// SetLogoURL sets the "logo_url" field. +func (oc *OAuth2ClientCreate) SetLogoURL(s string) *OAuth2ClientCreate { + oc.mutation.SetLogoURL(s) + return oc +} + +// SetID sets the "id" field. +func (oc *OAuth2ClientCreate) SetID(s string) *OAuth2ClientCreate { + oc.mutation.SetID(s) + return oc +} + +// Mutation returns the OAuth2ClientMutation object of the builder. +func (oc *OAuth2ClientCreate) Mutation() *OAuth2ClientMutation { + return oc.mutation +} + +// Save creates the OAuth2Client in the database. +func (oc *OAuth2ClientCreate) Save(ctx context.Context) (*OAuth2Client, error) { + return withHooks(ctx, oc.sqlSave, oc.mutation, oc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (oc *OAuth2ClientCreate) SaveX(ctx context.Context) *OAuth2Client { + v, err := oc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (oc *OAuth2ClientCreate) Exec(ctx context.Context) error { + _, err := oc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (oc *OAuth2ClientCreate) ExecX(ctx context.Context) { + if err := oc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (oc *OAuth2ClientCreate) check() error { + if _, ok := oc.mutation.Secret(); !ok { + return &ValidationError{Name: "secret", err: errors.New(`db: missing required field "OAuth2Client.secret"`)} + } + if v, ok := oc.mutation.Secret(); ok { + if err := oauth2client.SecretValidator(v); err != nil { + return &ValidationError{Name: "secret", err: fmt.Errorf(`db: validator failed for field "OAuth2Client.secret": %w`, err)} + } + } + if _, ok := oc.mutation.Public(); !ok { + return &ValidationError{Name: "public", err: errors.New(`db: missing required field "OAuth2Client.public"`)} + } + if _, ok := oc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`db: missing required field "OAuth2Client.name"`)} + } + if v, ok := oc.mutation.Name(); ok { + if err := oauth2client.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`db: validator failed for field "OAuth2Client.name": %w`, err)} + } + } + if _, ok := oc.mutation.LogoURL(); !ok { + return &ValidationError{Name: "logo_url", err: errors.New(`db: missing required field "OAuth2Client.logo_url"`)} + } + if v, ok := oc.mutation.LogoURL(); ok { + if err := oauth2client.LogoURLValidator(v); err != nil { + return &ValidationError{Name: "logo_url", err: fmt.Errorf(`db: validator failed for field "OAuth2Client.logo_url": %w`, err)} + } + } + if v, ok := oc.mutation.ID(); ok { + if err := oauth2client.IDValidator(v); err != nil { + return &ValidationError{Name: "id", err: fmt.Errorf(`db: validator failed for field "OAuth2Client.id": %w`, err)} + } + } + return nil +} + +func (oc *OAuth2ClientCreate) sqlSave(ctx context.Context) (*OAuth2Client, error) { + if err := oc.check(); err != nil { + return nil, err + } + _node, _spec := oc.createSpec() + if err := sqlgraph.CreateNode(ctx, oc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected OAuth2Client.ID type: %T", _spec.ID.Value) + } + } + oc.mutation.id = &_node.ID + oc.mutation.done = true + return _node, nil +} + +func (oc *OAuth2ClientCreate) createSpec() (*OAuth2Client, *sqlgraph.CreateSpec) { + var ( + _node = &OAuth2Client{config: oc.config} + _spec = sqlgraph.NewCreateSpec(oauth2client.Table, sqlgraph.NewFieldSpec(oauth2client.FieldID, field.TypeString)) + ) + if id, ok := oc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := oc.mutation.Secret(); ok { + _spec.SetField(oauth2client.FieldSecret, field.TypeString, value) + _node.Secret = value + } + if value, ok := oc.mutation.RedirectUris(); ok { + _spec.SetField(oauth2client.FieldRedirectUris, field.TypeJSON, value) + _node.RedirectUris = value + } + if value, ok := oc.mutation.TrustedPeers(); ok { + _spec.SetField(oauth2client.FieldTrustedPeers, field.TypeJSON, value) + _node.TrustedPeers = value + } + if value, ok := oc.mutation.Public(); ok { + _spec.SetField(oauth2client.FieldPublic, field.TypeBool, value) + _node.Public = value + } + if value, ok := oc.mutation.Name(); ok { + _spec.SetField(oauth2client.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := oc.mutation.LogoURL(); ok { + _spec.SetField(oauth2client.FieldLogoURL, field.TypeString, value) + _node.LogoURL = value + } + return _node, _spec +} + +// OAuth2ClientCreateBulk is the builder for creating many OAuth2Client entities in bulk. +type OAuth2ClientCreateBulk struct { + config + builders []*OAuth2ClientCreate +} + +// Save creates the OAuth2Client entities in the database. +func (ocb *OAuth2ClientCreateBulk) Save(ctx context.Context) ([]*OAuth2Client, error) { + specs := make([]*sqlgraph.CreateSpec, len(ocb.builders)) + nodes := make([]*OAuth2Client, len(ocb.builders)) + mutators := make([]Mutator, len(ocb.builders)) + for i := range ocb.builders { + func(i int, root context.Context) { + builder := ocb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*OAuth2ClientMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ocb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ocb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ocb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ocb *OAuth2ClientCreateBulk) SaveX(ctx context.Context) []*OAuth2Client { + v, err := ocb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ocb *OAuth2ClientCreateBulk) Exec(ctx context.Context) error { + _, err := ocb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ocb *OAuth2ClientCreateBulk) ExecX(ctx context.Context) { + if err := ocb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_delete.go b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_delete.go new file mode 100644 index 00000000..ee88e280 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/oauth2client" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// OAuth2ClientDelete is the builder for deleting a OAuth2Client entity. +type OAuth2ClientDelete struct { + config + hooks []Hook + mutation *OAuth2ClientMutation +} + +// Where appends a list predicates to the OAuth2ClientDelete builder. +func (od *OAuth2ClientDelete) Where(ps ...predicate.OAuth2Client) *OAuth2ClientDelete { + od.mutation.Where(ps...) + return od +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (od *OAuth2ClientDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, od.sqlExec, od.mutation, od.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (od *OAuth2ClientDelete) ExecX(ctx context.Context) int { + n, err := od.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (od *OAuth2ClientDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(oauth2client.Table, sqlgraph.NewFieldSpec(oauth2client.FieldID, field.TypeString)) + if ps := od.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, od.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + od.mutation.done = true + return affected, err +} + +// OAuth2ClientDeleteOne is the builder for deleting a single OAuth2Client entity. +type OAuth2ClientDeleteOne struct { + od *OAuth2ClientDelete +} + +// Where appends a list predicates to the OAuth2ClientDelete builder. +func (odo *OAuth2ClientDeleteOne) Where(ps ...predicate.OAuth2Client) *OAuth2ClientDeleteOne { + odo.od.mutation.Where(ps...) + return odo +} + +// Exec executes the deletion query. +func (odo *OAuth2ClientDeleteOne) Exec(ctx context.Context) error { + n, err := odo.od.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{oauth2client.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (odo *OAuth2ClientDeleteOne) ExecX(ctx context.Context) { + if err := odo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_query.go b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_query.go new file mode 100644 index 00000000..d2f49ec1 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/oauth2client" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// OAuth2ClientQuery is the builder for querying OAuth2Client entities. +type OAuth2ClientQuery struct { + config + ctx *QueryContext + order []oauth2client.OrderOption + inters []Interceptor + predicates []predicate.OAuth2Client + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the OAuth2ClientQuery builder. +func (oq *OAuth2ClientQuery) Where(ps ...predicate.OAuth2Client) *OAuth2ClientQuery { + oq.predicates = append(oq.predicates, ps...) + return oq +} + +// Limit the number of records to be returned by this query. +func (oq *OAuth2ClientQuery) Limit(limit int) *OAuth2ClientQuery { + oq.ctx.Limit = &limit + return oq +} + +// Offset to start from. +func (oq *OAuth2ClientQuery) Offset(offset int) *OAuth2ClientQuery { + oq.ctx.Offset = &offset + return oq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (oq *OAuth2ClientQuery) Unique(unique bool) *OAuth2ClientQuery { + oq.ctx.Unique = &unique + return oq +} + +// Order specifies how the records should be ordered. +func (oq *OAuth2ClientQuery) Order(o ...oauth2client.OrderOption) *OAuth2ClientQuery { + oq.order = append(oq.order, o...) + return oq +} + +// First returns the first OAuth2Client entity from the query. +// Returns a *NotFoundError when no OAuth2Client was found. +func (oq *OAuth2ClientQuery) First(ctx context.Context) (*OAuth2Client, error) { + nodes, err := oq.Limit(1).All(setContextOp(ctx, oq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{oauth2client.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (oq *OAuth2ClientQuery) FirstX(ctx context.Context) *OAuth2Client { + node, err := oq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first OAuth2Client ID from the query. +// Returns a *NotFoundError when no OAuth2Client ID was found. +func (oq *OAuth2ClientQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = oq.Limit(1).IDs(setContextOp(ctx, oq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{oauth2client.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (oq *OAuth2ClientQuery) FirstIDX(ctx context.Context) string { + id, err := oq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single OAuth2Client entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one OAuth2Client entity is found. +// Returns a *NotFoundError when no OAuth2Client entities are found. +func (oq *OAuth2ClientQuery) Only(ctx context.Context) (*OAuth2Client, error) { + nodes, err := oq.Limit(2).All(setContextOp(ctx, oq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{oauth2client.Label} + default: + return nil, &NotSingularError{oauth2client.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (oq *OAuth2ClientQuery) OnlyX(ctx context.Context) *OAuth2Client { + node, err := oq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only OAuth2Client ID in the query. +// Returns a *NotSingularError when more than one OAuth2Client ID is found. +// Returns a *NotFoundError when no entities are found. +func (oq *OAuth2ClientQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = oq.Limit(2).IDs(setContextOp(ctx, oq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{oauth2client.Label} + default: + err = &NotSingularError{oauth2client.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (oq *OAuth2ClientQuery) OnlyIDX(ctx context.Context) string { + id, err := oq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of OAuth2Clients. +func (oq *OAuth2ClientQuery) All(ctx context.Context) ([]*OAuth2Client, error) { + ctx = setContextOp(ctx, oq.ctx, "All") + if err := oq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*OAuth2Client, *OAuth2ClientQuery]() + return withInterceptors[[]*OAuth2Client](ctx, oq, qr, oq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (oq *OAuth2ClientQuery) AllX(ctx context.Context) []*OAuth2Client { + nodes, err := oq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of OAuth2Client IDs. +func (oq *OAuth2ClientQuery) IDs(ctx context.Context) (ids []string, err error) { + if oq.ctx.Unique == nil && oq.path != nil { + oq.Unique(true) + } + ctx = setContextOp(ctx, oq.ctx, "IDs") + if err = oq.Select(oauth2client.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (oq *OAuth2ClientQuery) IDsX(ctx context.Context) []string { + ids, err := oq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (oq *OAuth2ClientQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, oq.ctx, "Count") + if err := oq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, oq, querierCount[*OAuth2ClientQuery](), oq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (oq *OAuth2ClientQuery) CountX(ctx context.Context) int { + count, err := oq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (oq *OAuth2ClientQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, oq.ctx, "Exist") + switch _, err := oq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (oq *OAuth2ClientQuery) ExistX(ctx context.Context) bool { + exist, err := oq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the OAuth2ClientQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (oq *OAuth2ClientQuery) Clone() *OAuth2ClientQuery { + if oq == nil { + return nil + } + return &OAuth2ClientQuery{ + config: oq.config, + ctx: oq.ctx.Clone(), + order: append([]oauth2client.OrderOption{}, oq.order...), + inters: append([]Interceptor{}, oq.inters...), + predicates: append([]predicate.OAuth2Client{}, oq.predicates...), + // clone intermediate query. + sql: oq.sql.Clone(), + path: oq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Secret string `json:"secret,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.OAuth2Client.Query(). +// GroupBy(oauth2client.FieldSecret). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (oq *OAuth2ClientQuery) GroupBy(field string, fields ...string) *OAuth2ClientGroupBy { + oq.ctx.Fields = append([]string{field}, fields...) + grbuild := &OAuth2ClientGroupBy{build: oq} + grbuild.flds = &oq.ctx.Fields + grbuild.label = oauth2client.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Secret string `json:"secret,omitempty"` +// } +// +// client.OAuth2Client.Query(). +// Select(oauth2client.FieldSecret). +// Scan(ctx, &v) +func (oq *OAuth2ClientQuery) Select(fields ...string) *OAuth2ClientSelect { + oq.ctx.Fields = append(oq.ctx.Fields, fields...) + sbuild := &OAuth2ClientSelect{OAuth2ClientQuery: oq} + sbuild.label = oauth2client.Label + sbuild.flds, sbuild.scan = &oq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a OAuth2ClientSelect configured with the given aggregations. +func (oq *OAuth2ClientQuery) Aggregate(fns ...AggregateFunc) *OAuth2ClientSelect { + return oq.Select().Aggregate(fns...) +} + +func (oq *OAuth2ClientQuery) prepareQuery(ctx context.Context) error { + for _, inter := range oq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, oq); err != nil { + return err + } + } + } + for _, f := range oq.ctx.Fields { + if !oauth2client.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if oq.path != nil { + prev, err := oq.path(ctx) + if err != nil { + return err + } + oq.sql = prev + } + return nil +} + +func (oq *OAuth2ClientQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*OAuth2Client, error) { + var ( + nodes = []*OAuth2Client{} + _spec = oq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*OAuth2Client).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &OAuth2Client{config: oq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, oq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (oq *OAuth2ClientQuery) sqlCount(ctx context.Context) (int, error) { + _spec := oq.querySpec() + _spec.Node.Columns = oq.ctx.Fields + if len(oq.ctx.Fields) > 0 { + _spec.Unique = oq.ctx.Unique != nil && *oq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, oq.driver, _spec) +} + +func (oq *OAuth2ClientQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(oauth2client.Table, oauth2client.Columns, sqlgraph.NewFieldSpec(oauth2client.FieldID, field.TypeString)) + _spec.From = oq.sql + if unique := oq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if oq.path != nil { + _spec.Unique = true + } + if fields := oq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, oauth2client.FieldID) + for i := range fields { + if fields[i] != oauth2client.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := oq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := oq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := oq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := oq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (oq *OAuth2ClientQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(oq.driver.Dialect()) + t1 := builder.Table(oauth2client.Table) + columns := oq.ctx.Fields + if len(columns) == 0 { + columns = oauth2client.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if oq.sql != nil { + selector = oq.sql + selector.Select(selector.Columns(columns...)...) + } + if oq.ctx.Unique != nil && *oq.ctx.Unique { + selector.Distinct() + } + for _, p := range oq.predicates { + p(selector) + } + for _, p := range oq.order { + p(selector) + } + if offset := oq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := oq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// OAuth2ClientGroupBy is the group-by builder for OAuth2Client entities. +type OAuth2ClientGroupBy struct { + selector + build *OAuth2ClientQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ogb *OAuth2ClientGroupBy) Aggregate(fns ...AggregateFunc) *OAuth2ClientGroupBy { + ogb.fns = append(ogb.fns, fns...) + return ogb +} + +// Scan applies the selector query and scans the result into the given value. +func (ogb *OAuth2ClientGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ogb.build.ctx, "GroupBy") + if err := ogb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*OAuth2ClientQuery, *OAuth2ClientGroupBy](ctx, ogb.build, ogb, ogb.build.inters, v) +} + +func (ogb *OAuth2ClientGroupBy) sqlScan(ctx context.Context, root *OAuth2ClientQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ogb.fns)) + for _, fn := range ogb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ogb.flds)+len(ogb.fns)) + for _, f := range *ogb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ogb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ogb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// OAuth2ClientSelect is the builder for selecting fields of OAuth2Client entities. +type OAuth2ClientSelect struct { + *OAuth2ClientQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (os *OAuth2ClientSelect) Aggregate(fns ...AggregateFunc) *OAuth2ClientSelect { + os.fns = append(os.fns, fns...) + return os +} + +// Scan applies the selector query and scans the result into the given value. +func (os *OAuth2ClientSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, os.ctx, "Select") + if err := os.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*OAuth2ClientQuery, *OAuth2ClientSelect](ctx, os.OAuth2ClientQuery, os, os.inters, v) +} + +func (os *OAuth2ClientSelect) sqlScan(ctx context.Context, root *OAuth2ClientQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(os.fns)) + for _, fn := range os.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*os.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := os.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_update.go b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_update.go new file mode 100644 index 00000000..b70feacc --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/oauth2client_update.go @@ -0,0 +1,410 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/oauth2client" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// OAuth2ClientUpdate is the builder for updating OAuth2Client entities. +type OAuth2ClientUpdate struct { + config + hooks []Hook + mutation *OAuth2ClientMutation +} + +// Where appends a list predicates to the OAuth2ClientUpdate builder. +func (ou *OAuth2ClientUpdate) Where(ps ...predicate.OAuth2Client) *OAuth2ClientUpdate { + ou.mutation.Where(ps...) + return ou +} + +// SetSecret sets the "secret" field. +func (ou *OAuth2ClientUpdate) SetSecret(s string) *OAuth2ClientUpdate { + ou.mutation.SetSecret(s) + return ou +} + +// SetRedirectUris sets the "redirect_uris" field. +func (ou *OAuth2ClientUpdate) SetRedirectUris(s []string) *OAuth2ClientUpdate { + ou.mutation.SetRedirectUris(s) + return ou +} + +// AppendRedirectUris appends s to the "redirect_uris" field. +func (ou *OAuth2ClientUpdate) AppendRedirectUris(s []string) *OAuth2ClientUpdate { + ou.mutation.AppendRedirectUris(s) + return ou +} + +// ClearRedirectUris clears the value of the "redirect_uris" field. +func (ou *OAuth2ClientUpdate) ClearRedirectUris() *OAuth2ClientUpdate { + ou.mutation.ClearRedirectUris() + return ou +} + +// SetTrustedPeers sets the "trusted_peers" field. +func (ou *OAuth2ClientUpdate) SetTrustedPeers(s []string) *OAuth2ClientUpdate { + ou.mutation.SetTrustedPeers(s) + return ou +} + +// AppendTrustedPeers appends s to the "trusted_peers" field. +func (ou *OAuth2ClientUpdate) AppendTrustedPeers(s []string) *OAuth2ClientUpdate { + ou.mutation.AppendTrustedPeers(s) + return ou +} + +// ClearTrustedPeers clears the value of the "trusted_peers" field. +func (ou *OAuth2ClientUpdate) ClearTrustedPeers() *OAuth2ClientUpdate { + ou.mutation.ClearTrustedPeers() + return ou +} + +// SetPublic sets the "public" field. +func (ou *OAuth2ClientUpdate) SetPublic(b bool) *OAuth2ClientUpdate { + ou.mutation.SetPublic(b) + return ou +} + +// SetName sets the "name" field. +func (ou *OAuth2ClientUpdate) SetName(s string) *OAuth2ClientUpdate { + ou.mutation.SetName(s) + return ou +} + +// SetLogoURL sets the "logo_url" field. +func (ou *OAuth2ClientUpdate) SetLogoURL(s string) *OAuth2ClientUpdate { + ou.mutation.SetLogoURL(s) + return ou +} + +// Mutation returns the OAuth2ClientMutation object of the builder. +func (ou *OAuth2ClientUpdate) Mutation() *OAuth2ClientMutation { + return ou.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (ou *OAuth2ClientUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, ou.sqlSave, ou.mutation, ou.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (ou *OAuth2ClientUpdate) SaveX(ctx context.Context) int { + affected, err := ou.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (ou *OAuth2ClientUpdate) Exec(ctx context.Context) error { + _, err := ou.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ou *OAuth2ClientUpdate) ExecX(ctx context.Context) { + if err := ou.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ou *OAuth2ClientUpdate) check() error { + if v, ok := ou.mutation.Secret(); ok { + if err := oauth2client.SecretValidator(v); err != nil { + return &ValidationError{Name: "secret", err: fmt.Errorf(`db: validator failed for field "OAuth2Client.secret": %w`, err)} + } + } + if v, ok := ou.mutation.Name(); ok { + if err := oauth2client.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`db: validator failed for field "OAuth2Client.name": %w`, err)} + } + } + if v, ok := ou.mutation.LogoURL(); ok { + if err := oauth2client.LogoURLValidator(v); err != nil { + return &ValidationError{Name: "logo_url", err: fmt.Errorf(`db: validator failed for field "OAuth2Client.logo_url": %w`, err)} + } + } + return nil +} + +func (ou *OAuth2ClientUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := ou.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(oauth2client.Table, oauth2client.Columns, sqlgraph.NewFieldSpec(oauth2client.FieldID, field.TypeString)) + if ps := ou.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := ou.mutation.Secret(); ok { + _spec.SetField(oauth2client.FieldSecret, field.TypeString, value) + } + if value, ok := ou.mutation.RedirectUris(); ok { + _spec.SetField(oauth2client.FieldRedirectUris, field.TypeJSON, value) + } + if value, ok := ou.mutation.AppendedRedirectUris(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, oauth2client.FieldRedirectUris, value) + }) + } + if ou.mutation.RedirectUrisCleared() { + _spec.ClearField(oauth2client.FieldRedirectUris, field.TypeJSON) + } + if value, ok := ou.mutation.TrustedPeers(); ok { + _spec.SetField(oauth2client.FieldTrustedPeers, field.TypeJSON, value) + } + if value, ok := ou.mutation.AppendedTrustedPeers(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, oauth2client.FieldTrustedPeers, value) + }) + } + if ou.mutation.TrustedPeersCleared() { + _spec.ClearField(oauth2client.FieldTrustedPeers, field.TypeJSON) + } + if value, ok := ou.mutation.Public(); ok { + _spec.SetField(oauth2client.FieldPublic, field.TypeBool, value) + } + if value, ok := ou.mutation.Name(); ok { + _spec.SetField(oauth2client.FieldName, field.TypeString, value) + } + if value, ok := ou.mutation.LogoURL(); ok { + _spec.SetField(oauth2client.FieldLogoURL, field.TypeString, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, ou.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{oauth2client.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + ou.mutation.done = true + return n, nil +} + +// OAuth2ClientUpdateOne is the builder for updating a single OAuth2Client entity. +type OAuth2ClientUpdateOne struct { + config + fields []string + hooks []Hook + mutation *OAuth2ClientMutation +} + +// SetSecret sets the "secret" field. +func (ouo *OAuth2ClientUpdateOne) SetSecret(s string) *OAuth2ClientUpdateOne { + ouo.mutation.SetSecret(s) + return ouo +} + +// SetRedirectUris sets the "redirect_uris" field. +func (ouo *OAuth2ClientUpdateOne) SetRedirectUris(s []string) *OAuth2ClientUpdateOne { + ouo.mutation.SetRedirectUris(s) + return ouo +} + +// AppendRedirectUris appends s to the "redirect_uris" field. +func (ouo *OAuth2ClientUpdateOne) AppendRedirectUris(s []string) *OAuth2ClientUpdateOne { + ouo.mutation.AppendRedirectUris(s) + return ouo +} + +// ClearRedirectUris clears the value of the "redirect_uris" field. +func (ouo *OAuth2ClientUpdateOne) ClearRedirectUris() *OAuth2ClientUpdateOne { + ouo.mutation.ClearRedirectUris() + return ouo +} + +// SetTrustedPeers sets the "trusted_peers" field. +func (ouo *OAuth2ClientUpdateOne) SetTrustedPeers(s []string) *OAuth2ClientUpdateOne { + ouo.mutation.SetTrustedPeers(s) + return ouo +} + +// AppendTrustedPeers appends s to the "trusted_peers" field. +func (ouo *OAuth2ClientUpdateOne) AppendTrustedPeers(s []string) *OAuth2ClientUpdateOne { + ouo.mutation.AppendTrustedPeers(s) + return ouo +} + +// ClearTrustedPeers clears the value of the "trusted_peers" field. +func (ouo *OAuth2ClientUpdateOne) ClearTrustedPeers() *OAuth2ClientUpdateOne { + ouo.mutation.ClearTrustedPeers() + return ouo +} + +// SetPublic sets the "public" field. +func (ouo *OAuth2ClientUpdateOne) SetPublic(b bool) *OAuth2ClientUpdateOne { + ouo.mutation.SetPublic(b) + return ouo +} + +// SetName sets the "name" field. +func (ouo *OAuth2ClientUpdateOne) SetName(s string) *OAuth2ClientUpdateOne { + ouo.mutation.SetName(s) + return ouo +} + +// SetLogoURL sets the "logo_url" field. +func (ouo *OAuth2ClientUpdateOne) SetLogoURL(s string) *OAuth2ClientUpdateOne { + ouo.mutation.SetLogoURL(s) + return ouo +} + +// Mutation returns the OAuth2ClientMutation object of the builder. +func (ouo *OAuth2ClientUpdateOne) Mutation() *OAuth2ClientMutation { + return ouo.mutation +} + +// Where appends a list predicates to the OAuth2ClientUpdate builder. +func (ouo *OAuth2ClientUpdateOne) Where(ps ...predicate.OAuth2Client) *OAuth2ClientUpdateOne { + ouo.mutation.Where(ps...) + return ouo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (ouo *OAuth2ClientUpdateOne) Select(field string, fields ...string) *OAuth2ClientUpdateOne { + ouo.fields = append([]string{field}, fields...) + return ouo +} + +// Save executes the query and returns the updated OAuth2Client entity. +func (ouo *OAuth2ClientUpdateOne) Save(ctx context.Context) (*OAuth2Client, error) { + return withHooks(ctx, ouo.sqlSave, ouo.mutation, ouo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (ouo *OAuth2ClientUpdateOne) SaveX(ctx context.Context) *OAuth2Client { + node, err := ouo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (ouo *OAuth2ClientUpdateOne) Exec(ctx context.Context) error { + _, err := ouo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ouo *OAuth2ClientUpdateOne) ExecX(ctx context.Context) { + if err := ouo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ouo *OAuth2ClientUpdateOne) check() error { + if v, ok := ouo.mutation.Secret(); ok { + if err := oauth2client.SecretValidator(v); err != nil { + return &ValidationError{Name: "secret", err: fmt.Errorf(`db: validator failed for field "OAuth2Client.secret": %w`, err)} + } + } + if v, ok := ouo.mutation.Name(); ok { + if err := oauth2client.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`db: validator failed for field "OAuth2Client.name": %w`, err)} + } + } + if v, ok := ouo.mutation.LogoURL(); ok { + if err := oauth2client.LogoURLValidator(v); err != nil { + return &ValidationError{Name: "logo_url", err: fmt.Errorf(`db: validator failed for field "OAuth2Client.logo_url": %w`, err)} + } + } + return nil +} + +func (ouo *OAuth2ClientUpdateOne) sqlSave(ctx context.Context) (_node *OAuth2Client, err error) { + if err := ouo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(oauth2client.Table, oauth2client.Columns, sqlgraph.NewFieldSpec(oauth2client.FieldID, field.TypeString)) + id, ok := ouo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "OAuth2Client.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := ouo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, oauth2client.FieldID) + for _, f := range fields { + if !oauth2client.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != oauth2client.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := ouo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := ouo.mutation.Secret(); ok { + _spec.SetField(oauth2client.FieldSecret, field.TypeString, value) + } + if value, ok := ouo.mutation.RedirectUris(); ok { + _spec.SetField(oauth2client.FieldRedirectUris, field.TypeJSON, value) + } + if value, ok := ouo.mutation.AppendedRedirectUris(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, oauth2client.FieldRedirectUris, value) + }) + } + if ouo.mutation.RedirectUrisCleared() { + _spec.ClearField(oauth2client.FieldRedirectUris, field.TypeJSON) + } + if value, ok := ouo.mutation.TrustedPeers(); ok { + _spec.SetField(oauth2client.FieldTrustedPeers, field.TypeJSON, value) + } + if value, ok := ouo.mutation.AppendedTrustedPeers(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, oauth2client.FieldTrustedPeers, value) + }) + } + if ouo.mutation.TrustedPeersCleared() { + _spec.ClearField(oauth2client.FieldTrustedPeers, field.TypeJSON) + } + if value, ok := ouo.mutation.Public(); ok { + _spec.SetField(oauth2client.FieldPublic, field.TypeBool, value) + } + if value, ok := ouo.mutation.Name(); ok { + _spec.SetField(oauth2client.FieldName, field.TypeString, value) + } + if value, ok := ouo.mutation.LogoURL(); ok { + _spec.SetField(oauth2client.FieldLogoURL, field.TypeString, value) + } + _node = &OAuth2Client{config: ouo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, ouo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{oauth2client.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + ouo.mutation.done = true + return _node, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession.go b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession.go new file mode 100644 index 00000000..7adc3afc --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession.go @@ -0,0 +1,138 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "fmt" + "strings" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/offlinesession" +) + +// OfflineSession is the model entity for the OfflineSession schema. +type OfflineSession struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // UserID holds the value of the "user_id" field. + UserID string `json:"user_id,omitempty"` + // ConnID holds the value of the "conn_id" field. + ConnID string `json:"conn_id,omitempty"` + // Refresh holds the value of the "refresh" field. + Refresh []byte `json:"refresh,omitempty"` + // ConnectorData holds the value of the "connector_data" field. + ConnectorData *[]byte `json:"connector_data,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*OfflineSession) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case offlinesession.FieldRefresh, offlinesession.FieldConnectorData: + values[i] = new([]byte) + case offlinesession.FieldID, offlinesession.FieldUserID, offlinesession.FieldConnID: + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the OfflineSession fields. +func (os *OfflineSession) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case offlinesession.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + os.ID = value.String + } + case offlinesession.FieldUserID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + os.UserID = value.String + } + case offlinesession.FieldConnID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field conn_id", values[i]) + } else if value.Valid { + os.ConnID = value.String + } + case offlinesession.FieldRefresh: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field refresh", values[i]) + } else if value != nil { + os.Refresh = *value + } + case offlinesession.FieldConnectorData: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field connector_data", values[i]) + } else if value != nil { + os.ConnectorData = value + } + default: + os.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the OfflineSession. +// This includes values selected through modifiers, order, etc. +func (os *OfflineSession) Value(name string) (ent.Value, error) { + return os.selectValues.Get(name) +} + +// Update returns a builder for updating this OfflineSession. +// Note that you need to call OfflineSession.Unwrap() before calling this method if this OfflineSession +// was returned from a transaction, and the transaction was committed or rolled back. +func (os *OfflineSession) Update() *OfflineSessionUpdateOne { + return NewOfflineSessionClient(os.config).UpdateOne(os) +} + +// Unwrap unwraps the OfflineSession entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (os *OfflineSession) Unwrap() *OfflineSession { + _tx, ok := os.config.driver.(*txDriver) + if !ok { + panic("db: OfflineSession is not a transactional entity") + } + os.config.driver = _tx.drv + return os +} + +// String implements the fmt.Stringer. +func (os *OfflineSession) String() string { + var builder strings.Builder + builder.WriteString("OfflineSession(") + builder.WriteString(fmt.Sprintf("id=%v, ", os.ID)) + builder.WriteString("user_id=") + builder.WriteString(os.UserID) + builder.WriteString(", ") + builder.WriteString("conn_id=") + builder.WriteString(os.ConnID) + builder.WriteString(", ") + builder.WriteString("refresh=") + builder.WriteString(fmt.Sprintf("%v", os.Refresh)) + builder.WriteString(", ") + if v := os.ConnectorData; v != nil { + builder.WriteString("connector_data=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteByte(')') + return builder.String() +} + +// OfflineSessions is a parsable slice of OfflineSession. +type OfflineSessions []*OfflineSession diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/BUILD new file mode 100644 index 00000000..fd285bf6 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "offlinesession", + srcs = [ + "offlinesession.go", + "where.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession", + importpath = "github.com/dexidp/dex/storage/ent/db/offlinesession", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/offlinesession.go b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/offlinesession.go new file mode 100644 index 00000000..e7dbc446 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/offlinesession.go @@ -0,0 +1,70 @@ +// Code generated by ent, DO NOT EDIT. + +package offlinesession + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the offlinesession type in the database. + Label = "offline_session" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldConnID holds the string denoting the conn_id field in the database. + FieldConnID = "conn_id" + // FieldRefresh holds the string denoting the refresh field in the database. + FieldRefresh = "refresh" + // FieldConnectorData holds the string denoting the connector_data field in the database. + FieldConnectorData = "connector_data" + // Table holds the table name of the offlinesession in the database. + Table = "offline_sessions" +) + +// Columns holds all SQL columns for offlinesession fields. +var Columns = []string{ + FieldID, + FieldUserID, + FieldConnID, + FieldRefresh, + FieldConnectorData, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // UserIDValidator is a validator for the "user_id" field. It is called by the builders before save. + UserIDValidator func(string) error + // ConnIDValidator is a validator for the "conn_id" field. It is called by the builders before save. + ConnIDValidator func(string) error + // IDValidator is a validator for the "id" field. It is called by the builders before save. + IDValidator func(string) error +) + +// OrderOption defines the ordering options for the OfflineSession queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByConnID orders the results by the conn_id field. +func ByConnID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldConnID, opts...).ToFunc() +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/where.go b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/where.go new file mode 100644 index 00000000..09ba0140 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession/where.go @@ -0,0 +1,335 @@ +// Code generated by ent, DO NOT EDIT. + +package offlinesession + +import ( + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldContainsFold(FieldID, id)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEQ(FieldUserID, v)) +} + +// ConnID applies equality check predicate on the "conn_id" field. It's identical to ConnIDEQ. +func ConnID(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEQ(FieldConnID, v)) +} + +// Refresh applies equality check predicate on the "refresh" field. It's identical to RefreshEQ. +func Refresh(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEQ(FieldRefresh, v)) +} + +// ConnectorData applies equality check predicate on the "connector_data" field. It's identical to ConnectorDataEQ. +func ConnectorData(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEQ(FieldConnectorData, v)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNotIn(FieldUserID, vs...)) +} + +// UserIDGT applies the GT predicate on the "user_id" field. +func UserIDGT(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldGT(FieldUserID, v)) +} + +// UserIDGTE applies the GTE predicate on the "user_id" field. +func UserIDGTE(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldGTE(FieldUserID, v)) +} + +// UserIDLT applies the LT predicate on the "user_id" field. +func UserIDLT(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldLT(FieldUserID, v)) +} + +// UserIDLTE applies the LTE predicate on the "user_id" field. +func UserIDLTE(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldLTE(FieldUserID, v)) +} + +// UserIDContains applies the Contains predicate on the "user_id" field. +func UserIDContains(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldContains(FieldUserID, v)) +} + +// UserIDHasPrefix applies the HasPrefix predicate on the "user_id" field. +func UserIDHasPrefix(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldHasPrefix(FieldUserID, v)) +} + +// UserIDHasSuffix applies the HasSuffix predicate on the "user_id" field. +func UserIDHasSuffix(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldHasSuffix(FieldUserID, v)) +} + +// UserIDEqualFold applies the EqualFold predicate on the "user_id" field. +func UserIDEqualFold(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEqualFold(FieldUserID, v)) +} + +// UserIDContainsFold applies the ContainsFold predicate on the "user_id" field. +func UserIDContainsFold(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldContainsFold(FieldUserID, v)) +} + +// ConnIDEQ applies the EQ predicate on the "conn_id" field. +func ConnIDEQ(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEQ(FieldConnID, v)) +} + +// ConnIDNEQ applies the NEQ predicate on the "conn_id" field. +func ConnIDNEQ(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNEQ(FieldConnID, v)) +} + +// ConnIDIn applies the In predicate on the "conn_id" field. +func ConnIDIn(vs ...string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldIn(FieldConnID, vs...)) +} + +// ConnIDNotIn applies the NotIn predicate on the "conn_id" field. +func ConnIDNotIn(vs ...string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNotIn(FieldConnID, vs...)) +} + +// ConnIDGT applies the GT predicate on the "conn_id" field. +func ConnIDGT(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldGT(FieldConnID, v)) +} + +// ConnIDGTE applies the GTE predicate on the "conn_id" field. +func ConnIDGTE(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldGTE(FieldConnID, v)) +} + +// ConnIDLT applies the LT predicate on the "conn_id" field. +func ConnIDLT(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldLT(FieldConnID, v)) +} + +// ConnIDLTE applies the LTE predicate on the "conn_id" field. +func ConnIDLTE(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldLTE(FieldConnID, v)) +} + +// ConnIDContains applies the Contains predicate on the "conn_id" field. +func ConnIDContains(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldContains(FieldConnID, v)) +} + +// ConnIDHasPrefix applies the HasPrefix predicate on the "conn_id" field. +func ConnIDHasPrefix(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldHasPrefix(FieldConnID, v)) +} + +// ConnIDHasSuffix applies the HasSuffix predicate on the "conn_id" field. +func ConnIDHasSuffix(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldHasSuffix(FieldConnID, v)) +} + +// ConnIDEqualFold applies the EqualFold predicate on the "conn_id" field. +func ConnIDEqualFold(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEqualFold(FieldConnID, v)) +} + +// ConnIDContainsFold applies the ContainsFold predicate on the "conn_id" field. +func ConnIDContainsFold(v string) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldContainsFold(FieldConnID, v)) +} + +// RefreshEQ applies the EQ predicate on the "refresh" field. +func RefreshEQ(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEQ(FieldRefresh, v)) +} + +// RefreshNEQ applies the NEQ predicate on the "refresh" field. +func RefreshNEQ(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNEQ(FieldRefresh, v)) +} + +// RefreshIn applies the In predicate on the "refresh" field. +func RefreshIn(vs ...[]byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldIn(FieldRefresh, vs...)) +} + +// RefreshNotIn applies the NotIn predicate on the "refresh" field. +func RefreshNotIn(vs ...[]byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNotIn(FieldRefresh, vs...)) +} + +// RefreshGT applies the GT predicate on the "refresh" field. +func RefreshGT(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldGT(FieldRefresh, v)) +} + +// RefreshGTE applies the GTE predicate on the "refresh" field. +func RefreshGTE(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldGTE(FieldRefresh, v)) +} + +// RefreshLT applies the LT predicate on the "refresh" field. +func RefreshLT(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldLT(FieldRefresh, v)) +} + +// RefreshLTE applies the LTE predicate on the "refresh" field. +func RefreshLTE(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldLTE(FieldRefresh, v)) +} + +// ConnectorDataEQ applies the EQ predicate on the "connector_data" field. +func ConnectorDataEQ(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldEQ(FieldConnectorData, v)) +} + +// ConnectorDataNEQ applies the NEQ predicate on the "connector_data" field. +func ConnectorDataNEQ(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNEQ(FieldConnectorData, v)) +} + +// ConnectorDataIn applies the In predicate on the "connector_data" field. +func ConnectorDataIn(vs ...[]byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldIn(FieldConnectorData, vs...)) +} + +// ConnectorDataNotIn applies the NotIn predicate on the "connector_data" field. +func ConnectorDataNotIn(vs ...[]byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNotIn(FieldConnectorData, vs...)) +} + +// ConnectorDataGT applies the GT predicate on the "connector_data" field. +func ConnectorDataGT(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldGT(FieldConnectorData, v)) +} + +// ConnectorDataGTE applies the GTE predicate on the "connector_data" field. +func ConnectorDataGTE(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldGTE(FieldConnectorData, v)) +} + +// ConnectorDataLT applies the LT predicate on the "connector_data" field. +func ConnectorDataLT(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldLT(FieldConnectorData, v)) +} + +// ConnectorDataLTE applies the LTE predicate on the "connector_data" field. +func ConnectorDataLTE(v []byte) predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldLTE(FieldConnectorData, v)) +} + +// ConnectorDataIsNil applies the IsNil predicate on the "connector_data" field. +func ConnectorDataIsNil() predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldIsNull(FieldConnectorData)) +} + +// ConnectorDataNotNil applies the NotNil predicate on the "connector_data" field. +func ConnectorDataNotNil() predicate.OfflineSession { + return predicate.OfflineSession(sql.FieldNotNull(FieldConnectorData)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.OfflineSession) predicate.OfflineSession { + return predicate.OfflineSession(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.OfflineSession) predicate.OfflineSession { + return predicate.OfflineSession(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.OfflineSession) predicate.OfflineSession { + return predicate.OfflineSession(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_create.go b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_create.go new file mode 100644 index 00000000..28e2f502 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_create.go @@ -0,0 +1,241 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/offlinesession" +) + +// OfflineSessionCreate is the builder for creating a OfflineSession entity. +type OfflineSessionCreate struct { + config + mutation *OfflineSessionMutation + hooks []Hook +} + +// SetUserID sets the "user_id" field. +func (osc *OfflineSessionCreate) SetUserID(s string) *OfflineSessionCreate { + osc.mutation.SetUserID(s) + return osc +} + +// SetConnID sets the "conn_id" field. +func (osc *OfflineSessionCreate) SetConnID(s string) *OfflineSessionCreate { + osc.mutation.SetConnID(s) + return osc +} + +// SetRefresh sets the "refresh" field. +func (osc *OfflineSessionCreate) SetRefresh(b []byte) *OfflineSessionCreate { + osc.mutation.SetRefresh(b) + return osc +} + +// SetConnectorData sets the "connector_data" field. +func (osc *OfflineSessionCreate) SetConnectorData(b []byte) *OfflineSessionCreate { + osc.mutation.SetConnectorData(b) + return osc +} + +// SetID sets the "id" field. +func (osc *OfflineSessionCreate) SetID(s string) *OfflineSessionCreate { + osc.mutation.SetID(s) + return osc +} + +// Mutation returns the OfflineSessionMutation object of the builder. +func (osc *OfflineSessionCreate) Mutation() *OfflineSessionMutation { + return osc.mutation +} + +// Save creates the OfflineSession in the database. +func (osc *OfflineSessionCreate) Save(ctx context.Context) (*OfflineSession, error) { + return withHooks(ctx, osc.sqlSave, osc.mutation, osc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (osc *OfflineSessionCreate) SaveX(ctx context.Context) *OfflineSession { + v, err := osc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (osc *OfflineSessionCreate) Exec(ctx context.Context) error { + _, err := osc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (osc *OfflineSessionCreate) ExecX(ctx context.Context) { + if err := osc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (osc *OfflineSessionCreate) check() error { + if _, ok := osc.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`db: missing required field "OfflineSession.user_id"`)} + } + if v, ok := osc.mutation.UserID(); ok { + if err := offlinesession.UserIDValidator(v); err != nil { + return &ValidationError{Name: "user_id", err: fmt.Errorf(`db: validator failed for field "OfflineSession.user_id": %w`, err)} + } + } + if _, ok := osc.mutation.ConnID(); !ok { + return &ValidationError{Name: "conn_id", err: errors.New(`db: missing required field "OfflineSession.conn_id"`)} + } + if v, ok := osc.mutation.ConnID(); ok { + if err := offlinesession.ConnIDValidator(v); err != nil { + return &ValidationError{Name: "conn_id", err: fmt.Errorf(`db: validator failed for field "OfflineSession.conn_id": %w`, err)} + } + } + if _, ok := osc.mutation.Refresh(); !ok { + return &ValidationError{Name: "refresh", err: errors.New(`db: missing required field "OfflineSession.refresh"`)} + } + if v, ok := osc.mutation.ID(); ok { + if err := offlinesession.IDValidator(v); err != nil { + return &ValidationError{Name: "id", err: fmt.Errorf(`db: validator failed for field "OfflineSession.id": %w`, err)} + } + } + return nil +} + +func (osc *OfflineSessionCreate) sqlSave(ctx context.Context) (*OfflineSession, error) { + if err := osc.check(); err != nil { + return nil, err + } + _node, _spec := osc.createSpec() + if err := sqlgraph.CreateNode(ctx, osc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected OfflineSession.ID type: %T", _spec.ID.Value) + } + } + osc.mutation.id = &_node.ID + osc.mutation.done = true + return _node, nil +} + +func (osc *OfflineSessionCreate) createSpec() (*OfflineSession, *sqlgraph.CreateSpec) { + var ( + _node = &OfflineSession{config: osc.config} + _spec = sqlgraph.NewCreateSpec(offlinesession.Table, sqlgraph.NewFieldSpec(offlinesession.FieldID, field.TypeString)) + ) + if id, ok := osc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := osc.mutation.UserID(); ok { + _spec.SetField(offlinesession.FieldUserID, field.TypeString, value) + _node.UserID = value + } + if value, ok := osc.mutation.ConnID(); ok { + _spec.SetField(offlinesession.FieldConnID, field.TypeString, value) + _node.ConnID = value + } + if value, ok := osc.mutation.Refresh(); ok { + _spec.SetField(offlinesession.FieldRefresh, field.TypeBytes, value) + _node.Refresh = value + } + if value, ok := osc.mutation.ConnectorData(); ok { + _spec.SetField(offlinesession.FieldConnectorData, field.TypeBytes, value) + _node.ConnectorData = &value + } + return _node, _spec +} + +// OfflineSessionCreateBulk is the builder for creating many OfflineSession entities in bulk. +type OfflineSessionCreateBulk struct { + config + builders []*OfflineSessionCreate +} + +// Save creates the OfflineSession entities in the database. +func (oscb *OfflineSessionCreateBulk) Save(ctx context.Context) ([]*OfflineSession, error) { + specs := make([]*sqlgraph.CreateSpec, len(oscb.builders)) + nodes := make([]*OfflineSession, len(oscb.builders)) + mutators := make([]Mutator, len(oscb.builders)) + for i := range oscb.builders { + func(i int, root context.Context) { + builder := oscb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*OfflineSessionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, oscb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, oscb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, oscb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (oscb *OfflineSessionCreateBulk) SaveX(ctx context.Context) []*OfflineSession { + v, err := oscb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (oscb *OfflineSessionCreateBulk) Exec(ctx context.Context) error { + _, err := oscb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (oscb *OfflineSessionCreateBulk) ExecX(ctx context.Context) { + if err := oscb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_delete.go b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_delete.go new file mode 100644 index 00000000..354d0e91 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/offlinesession" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// OfflineSessionDelete is the builder for deleting a OfflineSession entity. +type OfflineSessionDelete struct { + config + hooks []Hook + mutation *OfflineSessionMutation +} + +// Where appends a list predicates to the OfflineSessionDelete builder. +func (osd *OfflineSessionDelete) Where(ps ...predicate.OfflineSession) *OfflineSessionDelete { + osd.mutation.Where(ps...) + return osd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (osd *OfflineSessionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, osd.sqlExec, osd.mutation, osd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (osd *OfflineSessionDelete) ExecX(ctx context.Context) int { + n, err := osd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (osd *OfflineSessionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(offlinesession.Table, sqlgraph.NewFieldSpec(offlinesession.FieldID, field.TypeString)) + if ps := osd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, osd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + osd.mutation.done = true + return affected, err +} + +// OfflineSessionDeleteOne is the builder for deleting a single OfflineSession entity. +type OfflineSessionDeleteOne struct { + osd *OfflineSessionDelete +} + +// Where appends a list predicates to the OfflineSessionDelete builder. +func (osdo *OfflineSessionDeleteOne) Where(ps ...predicate.OfflineSession) *OfflineSessionDeleteOne { + osdo.osd.mutation.Where(ps...) + return osdo +} + +// Exec executes the deletion query. +func (osdo *OfflineSessionDeleteOne) Exec(ctx context.Context) error { + n, err := osdo.osd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{offlinesession.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (osdo *OfflineSessionDeleteOne) ExecX(ctx context.Context) { + if err := osdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_query.go b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_query.go new file mode 100644 index 00000000..93bbb916 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/offlinesession" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// OfflineSessionQuery is the builder for querying OfflineSession entities. +type OfflineSessionQuery struct { + config + ctx *QueryContext + order []offlinesession.OrderOption + inters []Interceptor + predicates []predicate.OfflineSession + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the OfflineSessionQuery builder. +func (osq *OfflineSessionQuery) Where(ps ...predicate.OfflineSession) *OfflineSessionQuery { + osq.predicates = append(osq.predicates, ps...) + return osq +} + +// Limit the number of records to be returned by this query. +func (osq *OfflineSessionQuery) Limit(limit int) *OfflineSessionQuery { + osq.ctx.Limit = &limit + return osq +} + +// Offset to start from. +func (osq *OfflineSessionQuery) Offset(offset int) *OfflineSessionQuery { + osq.ctx.Offset = &offset + return osq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (osq *OfflineSessionQuery) Unique(unique bool) *OfflineSessionQuery { + osq.ctx.Unique = &unique + return osq +} + +// Order specifies how the records should be ordered. +func (osq *OfflineSessionQuery) Order(o ...offlinesession.OrderOption) *OfflineSessionQuery { + osq.order = append(osq.order, o...) + return osq +} + +// First returns the first OfflineSession entity from the query. +// Returns a *NotFoundError when no OfflineSession was found. +func (osq *OfflineSessionQuery) First(ctx context.Context) (*OfflineSession, error) { + nodes, err := osq.Limit(1).All(setContextOp(ctx, osq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{offlinesession.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (osq *OfflineSessionQuery) FirstX(ctx context.Context) *OfflineSession { + node, err := osq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first OfflineSession ID from the query. +// Returns a *NotFoundError when no OfflineSession ID was found. +func (osq *OfflineSessionQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = osq.Limit(1).IDs(setContextOp(ctx, osq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{offlinesession.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (osq *OfflineSessionQuery) FirstIDX(ctx context.Context) string { + id, err := osq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single OfflineSession entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one OfflineSession entity is found. +// Returns a *NotFoundError when no OfflineSession entities are found. +func (osq *OfflineSessionQuery) Only(ctx context.Context) (*OfflineSession, error) { + nodes, err := osq.Limit(2).All(setContextOp(ctx, osq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{offlinesession.Label} + default: + return nil, &NotSingularError{offlinesession.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (osq *OfflineSessionQuery) OnlyX(ctx context.Context) *OfflineSession { + node, err := osq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only OfflineSession ID in the query. +// Returns a *NotSingularError when more than one OfflineSession ID is found. +// Returns a *NotFoundError when no entities are found. +func (osq *OfflineSessionQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = osq.Limit(2).IDs(setContextOp(ctx, osq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{offlinesession.Label} + default: + err = &NotSingularError{offlinesession.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (osq *OfflineSessionQuery) OnlyIDX(ctx context.Context) string { + id, err := osq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of OfflineSessions. +func (osq *OfflineSessionQuery) All(ctx context.Context) ([]*OfflineSession, error) { + ctx = setContextOp(ctx, osq.ctx, "All") + if err := osq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*OfflineSession, *OfflineSessionQuery]() + return withInterceptors[[]*OfflineSession](ctx, osq, qr, osq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (osq *OfflineSessionQuery) AllX(ctx context.Context) []*OfflineSession { + nodes, err := osq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of OfflineSession IDs. +func (osq *OfflineSessionQuery) IDs(ctx context.Context) (ids []string, err error) { + if osq.ctx.Unique == nil && osq.path != nil { + osq.Unique(true) + } + ctx = setContextOp(ctx, osq.ctx, "IDs") + if err = osq.Select(offlinesession.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (osq *OfflineSessionQuery) IDsX(ctx context.Context) []string { + ids, err := osq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (osq *OfflineSessionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, osq.ctx, "Count") + if err := osq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, osq, querierCount[*OfflineSessionQuery](), osq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (osq *OfflineSessionQuery) CountX(ctx context.Context) int { + count, err := osq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (osq *OfflineSessionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, osq.ctx, "Exist") + switch _, err := osq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (osq *OfflineSessionQuery) ExistX(ctx context.Context) bool { + exist, err := osq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the OfflineSessionQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (osq *OfflineSessionQuery) Clone() *OfflineSessionQuery { + if osq == nil { + return nil + } + return &OfflineSessionQuery{ + config: osq.config, + ctx: osq.ctx.Clone(), + order: append([]offlinesession.OrderOption{}, osq.order...), + inters: append([]Interceptor{}, osq.inters...), + predicates: append([]predicate.OfflineSession{}, osq.predicates...), + // clone intermediate query. + sql: osq.sql.Clone(), + path: osq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// UserID string `json:"user_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.OfflineSession.Query(). +// GroupBy(offlinesession.FieldUserID). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (osq *OfflineSessionQuery) GroupBy(field string, fields ...string) *OfflineSessionGroupBy { + osq.ctx.Fields = append([]string{field}, fields...) + grbuild := &OfflineSessionGroupBy{build: osq} + grbuild.flds = &osq.ctx.Fields + grbuild.label = offlinesession.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// UserID string `json:"user_id,omitempty"` +// } +// +// client.OfflineSession.Query(). +// Select(offlinesession.FieldUserID). +// Scan(ctx, &v) +func (osq *OfflineSessionQuery) Select(fields ...string) *OfflineSessionSelect { + osq.ctx.Fields = append(osq.ctx.Fields, fields...) + sbuild := &OfflineSessionSelect{OfflineSessionQuery: osq} + sbuild.label = offlinesession.Label + sbuild.flds, sbuild.scan = &osq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a OfflineSessionSelect configured with the given aggregations. +func (osq *OfflineSessionQuery) Aggregate(fns ...AggregateFunc) *OfflineSessionSelect { + return osq.Select().Aggregate(fns...) +} + +func (osq *OfflineSessionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range osq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, osq); err != nil { + return err + } + } + } + for _, f := range osq.ctx.Fields { + if !offlinesession.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if osq.path != nil { + prev, err := osq.path(ctx) + if err != nil { + return err + } + osq.sql = prev + } + return nil +} + +func (osq *OfflineSessionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*OfflineSession, error) { + var ( + nodes = []*OfflineSession{} + _spec = osq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*OfflineSession).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &OfflineSession{config: osq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, osq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (osq *OfflineSessionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := osq.querySpec() + _spec.Node.Columns = osq.ctx.Fields + if len(osq.ctx.Fields) > 0 { + _spec.Unique = osq.ctx.Unique != nil && *osq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, osq.driver, _spec) +} + +func (osq *OfflineSessionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(offlinesession.Table, offlinesession.Columns, sqlgraph.NewFieldSpec(offlinesession.FieldID, field.TypeString)) + _spec.From = osq.sql + if unique := osq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if osq.path != nil { + _spec.Unique = true + } + if fields := osq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, offlinesession.FieldID) + for i := range fields { + if fields[i] != offlinesession.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := osq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := osq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := osq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := osq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (osq *OfflineSessionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(osq.driver.Dialect()) + t1 := builder.Table(offlinesession.Table) + columns := osq.ctx.Fields + if len(columns) == 0 { + columns = offlinesession.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if osq.sql != nil { + selector = osq.sql + selector.Select(selector.Columns(columns...)...) + } + if osq.ctx.Unique != nil && *osq.ctx.Unique { + selector.Distinct() + } + for _, p := range osq.predicates { + p(selector) + } + for _, p := range osq.order { + p(selector) + } + if offset := osq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := osq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// OfflineSessionGroupBy is the group-by builder for OfflineSession entities. +type OfflineSessionGroupBy struct { + selector + build *OfflineSessionQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (osgb *OfflineSessionGroupBy) Aggregate(fns ...AggregateFunc) *OfflineSessionGroupBy { + osgb.fns = append(osgb.fns, fns...) + return osgb +} + +// Scan applies the selector query and scans the result into the given value. +func (osgb *OfflineSessionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, osgb.build.ctx, "GroupBy") + if err := osgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*OfflineSessionQuery, *OfflineSessionGroupBy](ctx, osgb.build, osgb, osgb.build.inters, v) +} + +func (osgb *OfflineSessionGroupBy) sqlScan(ctx context.Context, root *OfflineSessionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(osgb.fns)) + for _, fn := range osgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*osgb.flds)+len(osgb.fns)) + for _, f := range *osgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*osgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := osgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// OfflineSessionSelect is the builder for selecting fields of OfflineSession entities. +type OfflineSessionSelect struct { + *OfflineSessionQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (oss *OfflineSessionSelect) Aggregate(fns ...AggregateFunc) *OfflineSessionSelect { + oss.fns = append(oss.fns, fns...) + return oss +} + +// Scan applies the selector query and scans the result into the given value. +func (oss *OfflineSessionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, oss.ctx, "Select") + if err := oss.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*OfflineSessionQuery, *OfflineSessionSelect](ctx, oss.OfflineSessionQuery, oss, oss.inters, v) +} + +func (oss *OfflineSessionSelect) sqlScan(ctx context.Context, root *OfflineSessionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(oss.fns)) + for _, fn := range oss.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*oss.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := oss.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_update.go b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_update.go new file mode 100644 index 00000000..9c5a37c0 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/offlinesession_update.go @@ -0,0 +1,301 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/offlinesession" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// OfflineSessionUpdate is the builder for updating OfflineSession entities. +type OfflineSessionUpdate struct { + config + hooks []Hook + mutation *OfflineSessionMutation +} + +// Where appends a list predicates to the OfflineSessionUpdate builder. +func (osu *OfflineSessionUpdate) Where(ps ...predicate.OfflineSession) *OfflineSessionUpdate { + osu.mutation.Where(ps...) + return osu +} + +// SetUserID sets the "user_id" field. +func (osu *OfflineSessionUpdate) SetUserID(s string) *OfflineSessionUpdate { + osu.mutation.SetUserID(s) + return osu +} + +// SetConnID sets the "conn_id" field. +func (osu *OfflineSessionUpdate) SetConnID(s string) *OfflineSessionUpdate { + osu.mutation.SetConnID(s) + return osu +} + +// SetRefresh sets the "refresh" field. +func (osu *OfflineSessionUpdate) SetRefresh(b []byte) *OfflineSessionUpdate { + osu.mutation.SetRefresh(b) + return osu +} + +// SetConnectorData sets the "connector_data" field. +func (osu *OfflineSessionUpdate) SetConnectorData(b []byte) *OfflineSessionUpdate { + osu.mutation.SetConnectorData(b) + return osu +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (osu *OfflineSessionUpdate) ClearConnectorData() *OfflineSessionUpdate { + osu.mutation.ClearConnectorData() + return osu +} + +// Mutation returns the OfflineSessionMutation object of the builder. +func (osu *OfflineSessionUpdate) Mutation() *OfflineSessionMutation { + return osu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (osu *OfflineSessionUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, osu.sqlSave, osu.mutation, osu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (osu *OfflineSessionUpdate) SaveX(ctx context.Context) int { + affected, err := osu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (osu *OfflineSessionUpdate) Exec(ctx context.Context) error { + _, err := osu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (osu *OfflineSessionUpdate) ExecX(ctx context.Context) { + if err := osu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (osu *OfflineSessionUpdate) check() error { + if v, ok := osu.mutation.UserID(); ok { + if err := offlinesession.UserIDValidator(v); err != nil { + return &ValidationError{Name: "user_id", err: fmt.Errorf(`db: validator failed for field "OfflineSession.user_id": %w`, err)} + } + } + if v, ok := osu.mutation.ConnID(); ok { + if err := offlinesession.ConnIDValidator(v); err != nil { + return &ValidationError{Name: "conn_id", err: fmt.Errorf(`db: validator failed for field "OfflineSession.conn_id": %w`, err)} + } + } + return nil +} + +func (osu *OfflineSessionUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := osu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(offlinesession.Table, offlinesession.Columns, sqlgraph.NewFieldSpec(offlinesession.FieldID, field.TypeString)) + if ps := osu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := osu.mutation.UserID(); ok { + _spec.SetField(offlinesession.FieldUserID, field.TypeString, value) + } + if value, ok := osu.mutation.ConnID(); ok { + _spec.SetField(offlinesession.FieldConnID, field.TypeString, value) + } + if value, ok := osu.mutation.Refresh(); ok { + _spec.SetField(offlinesession.FieldRefresh, field.TypeBytes, value) + } + if value, ok := osu.mutation.ConnectorData(); ok { + _spec.SetField(offlinesession.FieldConnectorData, field.TypeBytes, value) + } + if osu.mutation.ConnectorDataCleared() { + _spec.ClearField(offlinesession.FieldConnectorData, field.TypeBytes) + } + if n, err = sqlgraph.UpdateNodes(ctx, osu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{offlinesession.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + osu.mutation.done = true + return n, nil +} + +// OfflineSessionUpdateOne is the builder for updating a single OfflineSession entity. +type OfflineSessionUpdateOne struct { + config + fields []string + hooks []Hook + mutation *OfflineSessionMutation +} + +// SetUserID sets the "user_id" field. +func (osuo *OfflineSessionUpdateOne) SetUserID(s string) *OfflineSessionUpdateOne { + osuo.mutation.SetUserID(s) + return osuo +} + +// SetConnID sets the "conn_id" field. +func (osuo *OfflineSessionUpdateOne) SetConnID(s string) *OfflineSessionUpdateOne { + osuo.mutation.SetConnID(s) + return osuo +} + +// SetRefresh sets the "refresh" field. +func (osuo *OfflineSessionUpdateOne) SetRefresh(b []byte) *OfflineSessionUpdateOne { + osuo.mutation.SetRefresh(b) + return osuo +} + +// SetConnectorData sets the "connector_data" field. +func (osuo *OfflineSessionUpdateOne) SetConnectorData(b []byte) *OfflineSessionUpdateOne { + osuo.mutation.SetConnectorData(b) + return osuo +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (osuo *OfflineSessionUpdateOne) ClearConnectorData() *OfflineSessionUpdateOne { + osuo.mutation.ClearConnectorData() + return osuo +} + +// Mutation returns the OfflineSessionMutation object of the builder. +func (osuo *OfflineSessionUpdateOne) Mutation() *OfflineSessionMutation { + return osuo.mutation +} + +// Where appends a list predicates to the OfflineSessionUpdate builder. +func (osuo *OfflineSessionUpdateOne) Where(ps ...predicate.OfflineSession) *OfflineSessionUpdateOne { + osuo.mutation.Where(ps...) + return osuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (osuo *OfflineSessionUpdateOne) Select(field string, fields ...string) *OfflineSessionUpdateOne { + osuo.fields = append([]string{field}, fields...) + return osuo +} + +// Save executes the query and returns the updated OfflineSession entity. +func (osuo *OfflineSessionUpdateOne) Save(ctx context.Context) (*OfflineSession, error) { + return withHooks(ctx, osuo.sqlSave, osuo.mutation, osuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (osuo *OfflineSessionUpdateOne) SaveX(ctx context.Context) *OfflineSession { + node, err := osuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (osuo *OfflineSessionUpdateOne) Exec(ctx context.Context) error { + _, err := osuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (osuo *OfflineSessionUpdateOne) ExecX(ctx context.Context) { + if err := osuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (osuo *OfflineSessionUpdateOne) check() error { + if v, ok := osuo.mutation.UserID(); ok { + if err := offlinesession.UserIDValidator(v); err != nil { + return &ValidationError{Name: "user_id", err: fmt.Errorf(`db: validator failed for field "OfflineSession.user_id": %w`, err)} + } + } + if v, ok := osuo.mutation.ConnID(); ok { + if err := offlinesession.ConnIDValidator(v); err != nil { + return &ValidationError{Name: "conn_id", err: fmt.Errorf(`db: validator failed for field "OfflineSession.conn_id": %w`, err)} + } + } + return nil +} + +func (osuo *OfflineSessionUpdateOne) sqlSave(ctx context.Context) (_node *OfflineSession, err error) { + if err := osuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(offlinesession.Table, offlinesession.Columns, sqlgraph.NewFieldSpec(offlinesession.FieldID, field.TypeString)) + id, ok := osuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "OfflineSession.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := osuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, offlinesession.FieldID) + for _, f := range fields { + if !offlinesession.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != offlinesession.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := osuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := osuo.mutation.UserID(); ok { + _spec.SetField(offlinesession.FieldUserID, field.TypeString, value) + } + if value, ok := osuo.mutation.ConnID(); ok { + _spec.SetField(offlinesession.FieldConnID, field.TypeString, value) + } + if value, ok := osuo.mutation.Refresh(); ok { + _spec.SetField(offlinesession.FieldRefresh, field.TypeBytes, value) + } + if value, ok := osuo.mutation.ConnectorData(); ok { + _spec.SetField(offlinesession.FieldConnectorData, field.TypeBytes, value) + } + if osuo.mutation.ConnectorDataCleared() { + _spec.ClearField(offlinesession.FieldConnectorData, field.TypeBytes) + } + _node = &OfflineSession{config: osuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, osuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{offlinesession.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + osuo.mutation.done = true + return _node, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/password.go b/vendor/github.com/dexidp/dex/storage/ent/db/password.go new file mode 100644 index 00000000..70f8ad2b --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/password.go @@ -0,0 +1,138 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "fmt" + "strings" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/password" +) + +// Password is the model entity for the Password schema. +type Password struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Email holds the value of the "email" field. + Email string `json:"email,omitempty"` + // Hash holds the value of the "hash" field. + Hash []byte `json:"hash,omitempty"` + // Username holds the value of the "username" field. + Username string `json:"username,omitempty"` + // UserID holds the value of the "user_id" field. + UserID string `json:"user_id,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Password) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case password.FieldHash: + values[i] = new([]byte) + case password.FieldID: + values[i] = new(sql.NullInt64) + case password.FieldEmail, password.FieldUsername, password.FieldUserID: + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Password fields. +func (pa *Password) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case password.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + pa.ID = int(value.Int64) + case password.FieldEmail: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field email", values[i]) + } else if value.Valid { + pa.Email = value.String + } + case password.FieldHash: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field hash", values[i]) + } else if value != nil { + pa.Hash = *value + } + case password.FieldUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field username", values[i]) + } else if value.Valid { + pa.Username = value.String + } + case password.FieldUserID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + pa.UserID = value.String + } + default: + pa.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Password. +// This includes values selected through modifiers, order, etc. +func (pa *Password) Value(name string) (ent.Value, error) { + return pa.selectValues.Get(name) +} + +// Update returns a builder for updating this Password. +// Note that you need to call Password.Unwrap() before calling this method if this Password +// was returned from a transaction, and the transaction was committed or rolled back. +func (pa *Password) Update() *PasswordUpdateOne { + return NewPasswordClient(pa.config).UpdateOne(pa) +} + +// Unwrap unwraps the Password entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (pa *Password) Unwrap() *Password { + _tx, ok := pa.config.driver.(*txDriver) + if !ok { + panic("db: Password is not a transactional entity") + } + pa.config.driver = _tx.drv + return pa +} + +// String implements the fmt.Stringer. +func (pa *Password) String() string { + var builder strings.Builder + builder.WriteString("Password(") + builder.WriteString(fmt.Sprintf("id=%v, ", pa.ID)) + builder.WriteString("email=") + builder.WriteString(pa.Email) + builder.WriteString(", ") + builder.WriteString("hash=") + builder.WriteString(fmt.Sprintf("%v", pa.Hash)) + builder.WriteString(", ") + builder.WriteString("username=") + builder.WriteString(pa.Username) + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(pa.UserID) + builder.WriteByte(')') + return builder.String() +} + +// Passwords is a parsable slice of Password. +type Passwords []*Password diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/password/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/password/BUILD new file mode 100644 index 00000000..a9715605 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/password/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "password", + srcs = [ + "password.go", + "where.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/password", + importpath = "github.com/dexidp/dex/storage/ent/db/password", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/password/password.go b/vendor/github.com/dexidp/dex/storage/ent/db/password/password.go new file mode 100644 index 00000000..37ab1e49 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/password/password.go @@ -0,0 +1,75 @@ +// Code generated by ent, DO NOT EDIT. + +package password + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the password type in the database. + Label = "password" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldEmail holds the string denoting the email field in the database. + FieldEmail = "email" + // FieldHash holds the string denoting the hash field in the database. + FieldHash = "hash" + // FieldUsername holds the string denoting the username field in the database. + FieldUsername = "username" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // Table holds the table name of the password in the database. + Table = "passwords" +) + +// Columns holds all SQL columns for password fields. +var Columns = []string{ + FieldID, + FieldEmail, + FieldHash, + FieldUsername, + FieldUserID, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // EmailValidator is a validator for the "email" field. It is called by the builders before save. + EmailValidator func(string) error + // UsernameValidator is a validator for the "username" field. It is called by the builders before save. + UsernameValidator func(string) error + // UserIDValidator is a validator for the "user_id" field. It is called by the builders before save. + UserIDValidator func(string) error +) + +// OrderOption defines the ordering options for the Password queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByEmail orders the results by the email field. +func ByEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEmail, opts...).ToFunc() +} + +// ByUsername orders the results by the username field. +func ByUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsername, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/password/where.go b/vendor/github.com/dexidp/dex/storage/ent/db/password/where.go new file mode 100644 index 00000000..718ac151 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/password/where.go @@ -0,0 +1,340 @@ +// Code generated by ent, DO NOT EDIT. + +package password + +import ( + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Password { + return predicate.Password(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Password { + return predicate.Password(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Password { + return predicate.Password(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Password { + return predicate.Password(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Password { + return predicate.Password(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Password { + return predicate.Password(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Password { + return predicate.Password(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Password { + return predicate.Password(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Password { + return predicate.Password(sql.FieldLTE(FieldID, id)) +} + +// Email applies equality check predicate on the "email" field. It's identical to EmailEQ. +func Email(v string) predicate.Password { + return predicate.Password(sql.FieldEQ(FieldEmail, v)) +} + +// Hash applies equality check predicate on the "hash" field. It's identical to HashEQ. +func Hash(v []byte) predicate.Password { + return predicate.Password(sql.FieldEQ(FieldHash, v)) +} + +// Username applies equality check predicate on the "username" field. It's identical to UsernameEQ. +func Username(v string) predicate.Password { + return predicate.Password(sql.FieldEQ(FieldUsername, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v string) predicate.Password { + return predicate.Password(sql.FieldEQ(FieldUserID, v)) +} + +// EmailEQ applies the EQ predicate on the "email" field. +func EmailEQ(v string) predicate.Password { + return predicate.Password(sql.FieldEQ(FieldEmail, v)) +} + +// EmailNEQ applies the NEQ predicate on the "email" field. +func EmailNEQ(v string) predicate.Password { + return predicate.Password(sql.FieldNEQ(FieldEmail, v)) +} + +// EmailIn applies the In predicate on the "email" field. +func EmailIn(vs ...string) predicate.Password { + return predicate.Password(sql.FieldIn(FieldEmail, vs...)) +} + +// EmailNotIn applies the NotIn predicate on the "email" field. +func EmailNotIn(vs ...string) predicate.Password { + return predicate.Password(sql.FieldNotIn(FieldEmail, vs...)) +} + +// EmailGT applies the GT predicate on the "email" field. +func EmailGT(v string) predicate.Password { + return predicate.Password(sql.FieldGT(FieldEmail, v)) +} + +// EmailGTE applies the GTE predicate on the "email" field. +func EmailGTE(v string) predicate.Password { + return predicate.Password(sql.FieldGTE(FieldEmail, v)) +} + +// EmailLT applies the LT predicate on the "email" field. +func EmailLT(v string) predicate.Password { + return predicate.Password(sql.FieldLT(FieldEmail, v)) +} + +// EmailLTE applies the LTE predicate on the "email" field. +func EmailLTE(v string) predicate.Password { + return predicate.Password(sql.FieldLTE(FieldEmail, v)) +} + +// EmailContains applies the Contains predicate on the "email" field. +func EmailContains(v string) predicate.Password { + return predicate.Password(sql.FieldContains(FieldEmail, v)) +} + +// EmailHasPrefix applies the HasPrefix predicate on the "email" field. +func EmailHasPrefix(v string) predicate.Password { + return predicate.Password(sql.FieldHasPrefix(FieldEmail, v)) +} + +// EmailHasSuffix applies the HasSuffix predicate on the "email" field. +func EmailHasSuffix(v string) predicate.Password { + return predicate.Password(sql.FieldHasSuffix(FieldEmail, v)) +} + +// EmailEqualFold applies the EqualFold predicate on the "email" field. +func EmailEqualFold(v string) predicate.Password { + return predicate.Password(sql.FieldEqualFold(FieldEmail, v)) +} + +// EmailContainsFold applies the ContainsFold predicate on the "email" field. +func EmailContainsFold(v string) predicate.Password { + return predicate.Password(sql.FieldContainsFold(FieldEmail, v)) +} + +// HashEQ applies the EQ predicate on the "hash" field. +func HashEQ(v []byte) predicate.Password { + return predicate.Password(sql.FieldEQ(FieldHash, v)) +} + +// HashNEQ applies the NEQ predicate on the "hash" field. +func HashNEQ(v []byte) predicate.Password { + return predicate.Password(sql.FieldNEQ(FieldHash, v)) +} + +// HashIn applies the In predicate on the "hash" field. +func HashIn(vs ...[]byte) predicate.Password { + return predicate.Password(sql.FieldIn(FieldHash, vs...)) +} + +// HashNotIn applies the NotIn predicate on the "hash" field. +func HashNotIn(vs ...[]byte) predicate.Password { + return predicate.Password(sql.FieldNotIn(FieldHash, vs...)) +} + +// HashGT applies the GT predicate on the "hash" field. +func HashGT(v []byte) predicate.Password { + return predicate.Password(sql.FieldGT(FieldHash, v)) +} + +// HashGTE applies the GTE predicate on the "hash" field. +func HashGTE(v []byte) predicate.Password { + return predicate.Password(sql.FieldGTE(FieldHash, v)) +} + +// HashLT applies the LT predicate on the "hash" field. +func HashLT(v []byte) predicate.Password { + return predicate.Password(sql.FieldLT(FieldHash, v)) +} + +// HashLTE applies the LTE predicate on the "hash" field. +func HashLTE(v []byte) predicate.Password { + return predicate.Password(sql.FieldLTE(FieldHash, v)) +} + +// UsernameEQ applies the EQ predicate on the "username" field. +func UsernameEQ(v string) predicate.Password { + return predicate.Password(sql.FieldEQ(FieldUsername, v)) +} + +// UsernameNEQ applies the NEQ predicate on the "username" field. +func UsernameNEQ(v string) predicate.Password { + return predicate.Password(sql.FieldNEQ(FieldUsername, v)) +} + +// UsernameIn applies the In predicate on the "username" field. +func UsernameIn(vs ...string) predicate.Password { + return predicate.Password(sql.FieldIn(FieldUsername, vs...)) +} + +// UsernameNotIn applies the NotIn predicate on the "username" field. +func UsernameNotIn(vs ...string) predicate.Password { + return predicate.Password(sql.FieldNotIn(FieldUsername, vs...)) +} + +// UsernameGT applies the GT predicate on the "username" field. +func UsernameGT(v string) predicate.Password { + return predicate.Password(sql.FieldGT(FieldUsername, v)) +} + +// UsernameGTE applies the GTE predicate on the "username" field. +func UsernameGTE(v string) predicate.Password { + return predicate.Password(sql.FieldGTE(FieldUsername, v)) +} + +// UsernameLT applies the LT predicate on the "username" field. +func UsernameLT(v string) predicate.Password { + return predicate.Password(sql.FieldLT(FieldUsername, v)) +} + +// UsernameLTE applies the LTE predicate on the "username" field. +func UsernameLTE(v string) predicate.Password { + return predicate.Password(sql.FieldLTE(FieldUsername, v)) +} + +// UsernameContains applies the Contains predicate on the "username" field. +func UsernameContains(v string) predicate.Password { + return predicate.Password(sql.FieldContains(FieldUsername, v)) +} + +// UsernameHasPrefix applies the HasPrefix predicate on the "username" field. +func UsernameHasPrefix(v string) predicate.Password { + return predicate.Password(sql.FieldHasPrefix(FieldUsername, v)) +} + +// UsernameHasSuffix applies the HasSuffix predicate on the "username" field. +func UsernameHasSuffix(v string) predicate.Password { + return predicate.Password(sql.FieldHasSuffix(FieldUsername, v)) +} + +// UsernameEqualFold applies the EqualFold predicate on the "username" field. +func UsernameEqualFold(v string) predicate.Password { + return predicate.Password(sql.FieldEqualFold(FieldUsername, v)) +} + +// UsernameContainsFold applies the ContainsFold predicate on the "username" field. +func UsernameContainsFold(v string) predicate.Password { + return predicate.Password(sql.FieldContainsFold(FieldUsername, v)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v string) predicate.Password { + return predicate.Password(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v string) predicate.Password { + return predicate.Password(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...string) predicate.Password { + return predicate.Password(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...string) predicate.Password { + return predicate.Password(sql.FieldNotIn(FieldUserID, vs...)) +} + +// UserIDGT applies the GT predicate on the "user_id" field. +func UserIDGT(v string) predicate.Password { + return predicate.Password(sql.FieldGT(FieldUserID, v)) +} + +// UserIDGTE applies the GTE predicate on the "user_id" field. +func UserIDGTE(v string) predicate.Password { + return predicate.Password(sql.FieldGTE(FieldUserID, v)) +} + +// UserIDLT applies the LT predicate on the "user_id" field. +func UserIDLT(v string) predicate.Password { + return predicate.Password(sql.FieldLT(FieldUserID, v)) +} + +// UserIDLTE applies the LTE predicate on the "user_id" field. +func UserIDLTE(v string) predicate.Password { + return predicate.Password(sql.FieldLTE(FieldUserID, v)) +} + +// UserIDContains applies the Contains predicate on the "user_id" field. +func UserIDContains(v string) predicate.Password { + return predicate.Password(sql.FieldContains(FieldUserID, v)) +} + +// UserIDHasPrefix applies the HasPrefix predicate on the "user_id" field. +func UserIDHasPrefix(v string) predicate.Password { + return predicate.Password(sql.FieldHasPrefix(FieldUserID, v)) +} + +// UserIDHasSuffix applies the HasSuffix predicate on the "user_id" field. +func UserIDHasSuffix(v string) predicate.Password { + return predicate.Password(sql.FieldHasSuffix(FieldUserID, v)) +} + +// UserIDEqualFold applies the EqualFold predicate on the "user_id" field. +func UserIDEqualFold(v string) predicate.Password { + return predicate.Password(sql.FieldEqualFold(FieldUserID, v)) +} + +// UserIDContainsFold applies the ContainsFold predicate on the "user_id" field. +func UserIDContainsFold(v string) predicate.Password { + return predicate.Password(sql.FieldContainsFold(FieldUserID, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Password) predicate.Password { + return predicate.Password(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Password) predicate.Password { + return predicate.Password(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Password) predicate.Password { + return predicate.Password(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/password_create.go b/vendor/github.com/dexidp/dex/storage/ent/db/password_create.go new file mode 100644 index 00000000..a6b83d2f --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/password_create.go @@ -0,0 +1,233 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/password" +) + +// PasswordCreate is the builder for creating a Password entity. +type PasswordCreate struct { + config + mutation *PasswordMutation + hooks []Hook +} + +// SetEmail sets the "email" field. +func (pc *PasswordCreate) SetEmail(s string) *PasswordCreate { + pc.mutation.SetEmail(s) + return pc +} + +// SetHash sets the "hash" field. +func (pc *PasswordCreate) SetHash(b []byte) *PasswordCreate { + pc.mutation.SetHash(b) + return pc +} + +// SetUsername sets the "username" field. +func (pc *PasswordCreate) SetUsername(s string) *PasswordCreate { + pc.mutation.SetUsername(s) + return pc +} + +// SetUserID sets the "user_id" field. +func (pc *PasswordCreate) SetUserID(s string) *PasswordCreate { + pc.mutation.SetUserID(s) + return pc +} + +// Mutation returns the PasswordMutation object of the builder. +func (pc *PasswordCreate) Mutation() *PasswordMutation { + return pc.mutation +} + +// Save creates the Password in the database. +func (pc *PasswordCreate) Save(ctx context.Context) (*Password, error) { + return withHooks(ctx, pc.sqlSave, pc.mutation, pc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (pc *PasswordCreate) SaveX(ctx context.Context) *Password { + v, err := pc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pc *PasswordCreate) Exec(ctx context.Context) error { + _, err := pc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pc *PasswordCreate) ExecX(ctx context.Context) { + if err := pc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pc *PasswordCreate) check() error { + if _, ok := pc.mutation.Email(); !ok { + return &ValidationError{Name: "email", err: errors.New(`db: missing required field "Password.email"`)} + } + if v, ok := pc.mutation.Email(); ok { + if err := password.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`db: validator failed for field "Password.email": %w`, err)} + } + } + if _, ok := pc.mutation.Hash(); !ok { + return &ValidationError{Name: "hash", err: errors.New(`db: missing required field "Password.hash"`)} + } + if _, ok := pc.mutation.Username(); !ok { + return &ValidationError{Name: "username", err: errors.New(`db: missing required field "Password.username"`)} + } + if v, ok := pc.mutation.Username(); ok { + if err := password.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`db: validator failed for field "Password.username": %w`, err)} + } + } + if _, ok := pc.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`db: missing required field "Password.user_id"`)} + } + if v, ok := pc.mutation.UserID(); ok { + if err := password.UserIDValidator(v); err != nil { + return &ValidationError{Name: "user_id", err: fmt.Errorf(`db: validator failed for field "Password.user_id": %w`, err)} + } + } + return nil +} + +func (pc *PasswordCreate) sqlSave(ctx context.Context) (*Password, error) { + if err := pc.check(); err != nil { + return nil, err + } + _node, _spec := pc.createSpec() + if err := sqlgraph.CreateNode(ctx, pc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + pc.mutation.id = &_node.ID + pc.mutation.done = true + return _node, nil +} + +func (pc *PasswordCreate) createSpec() (*Password, *sqlgraph.CreateSpec) { + var ( + _node = &Password{config: pc.config} + _spec = sqlgraph.NewCreateSpec(password.Table, sqlgraph.NewFieldSpec(password.FieldID, field.TypeInt)) + ) + if value, ok := pc.mutation.Email(); ok { + _spec.SetField(password.FieldEmail, field.TypeString, value) + _node.Email = value + } + if value, ok := pc.mutation.Hash(); ok { + _spec.SetField(password.FieldHash, field.TypeBytes, value) + _node.Hash = value + } + if value, ok := pc.mutation.Username(); ok { + _spec.SetField(password.FieldUsername, field.TypeString, value) + _node.Username = value + } + if value, ok := pc.mutation.UserID(); ok { + _spec.SetField(password.FieldUserID, field.TypeString, value) + _node.UserID = value + } + return _node, _spec +} + +// PasswordCreateBulk is the builder for creating many Password entities in bulk. +type PasswordCreateBulk struct { + config + builders []*PasswordCreate +} + +// Save creates the Password entities in the database. +func (pcb *PasswordCreateBulk) Save(ctx context.Context) ([]*Password, error) { + specs := make([]*sqlgraph.CreateSpec, len(pcb.builders)) + nodes := make([]*Password, len(pcb.builders)) + mutators := make([]Mutator, len(pcb.builders)) + for i := range pcb.builders { + func(i int, root context.Context) { + builder := pcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PasswordMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, pcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, pcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, pcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (pcb *PasswordCreateBulk) SaveX(ctx context.Context) []*Password { + v, err := pcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pcb *PasswordCreateBulk) Exec(ctx context.Context) error { + _, err := pcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcb *PasswordCreateBulk) ExecX(ctx context.Context) { + if err := pcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/password_delete.go b/vendor/github.com/dexidp/dex/storage/ent/db/password_delete.go new file mode 100644 index 00000000..784d545e --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/password_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/password" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// PasswordDelete is the builder for deleting a Password entity. +type PasswordDelete struct { + config + hooks []Hook + mutation *PasswordMutation +} + +// Where appends a list predicates to the PasswordDelete builder. +func (pd *PasswordDelete) Where(ps ...predicate.Password) *PasswordDelete { + pd.mutation.Where(ps...) + return pd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (pd *PasswordDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, pd.sqlExec, pd.mutation, pd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (pd *PasswordDelete) ExecX(ctx context.Context) int { + n, err := pd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (pd *PasswordDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(password.Table, sqlgraph.NewFieldSpec(password.FieldID, field.TypeInt)) + if ps := pd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, pd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + pd.mutation.done = true + return affected, err +} + +// PasswordDeleteOne is the builder for deleting a single Password entity. +type PasswordDeleteOne struct { + pd *PasswordDelete +} + +// Where appends a list predicates to the PasswordDelete builder. +func (pdo *PasswordDeleteOne) Where(ps ...predicate.Password) *PasswordDeleteOne { + pdo.pd.mutation.Where(ps...) + return pdo +} + +// Exec executes the deletion query. +func (pdo *PasswordDeleteOne) Exec(ctx context.Context) error { + n, err := pdo.pd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{password.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (pdo *PasswordDeleteOne) ExecX(ctx context.Context) { + if err := pdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/password_query.go b/vendor/github.com/dexidp/dex/storage/ent/db/password_query.go new file mode 100644 index 00000000..6f5ff263 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/password_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/password" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// PasswordQuery is the builder for querying Password entities. +type PasswordQuery struct { + config + ctx *QueryContext + order []password.OrderOption + inters []Interceptor + predicates []predicate.Password + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PasswordQuery builder. +func (pq *PasswordQuery) Where(ps ...predicate.Password) *PasswordQuery { + pq.predicates = append(pq.predicates, ps...) + return pq +} + +// Limit the number of records to be returned by this query. +func (pq *PasswordQuery) Limit(limit int) *PasswordQuery { + pq.ctx.Limit = &limit + return pq +} + +// Offset to start from. +func (pq *PasswordQuery) Offset(offset int) *PasswordQuery { + pq.ctx.Offset = &offset + return pq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (pq *PasswordQuery) Unique(unique bool) *PasswordQuery { + pq.ctx.Unique = &unique + return pq +} + +// Order specifies how the records should be ordered. +func (pq *PasswordQuery) Order(o ...password.OrderOption) *PasswordQuery { + pq.order = append(pq.order, o...) + return pq +} + +// First returns the first Password entity from the query. +// Returns a *NotFoundError when no Password was found. +func (pq *PasswordQuery) First(ctx context.Context) (*Password, error) { + nodes, err := pq.Limit(1).All(setContextOp(ctx, pq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{password.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (pq *PasswordQuery) FirstX(ctx context.Context) *Password { + node, err := pq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Password ID from the query. +// Returns a *NotFoundError when no Password ID was found. +func (pq *PasswordQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = pq.Limit(1).IDs(setContextOp(ctx, pq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{password.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (pq *PasswordQuery) FirstIDX(ctx context.Context) int { + id, err := pq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Password entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Password entity is found. +// Returns a *NotFoundError when no Password entities are found. +func (pq *PasswordQuery) Only(ctx context.Context) (*Password, error) { + nodes, err := pq.Limit(2).All(setContextOp(ctx, pq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{password.Label} + default: + return nil, &NotSingularError{password.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (pq *PasswordQuery) OnlyX(ctx context.Context) *Password { + node, err := pq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Password ID in the query. +// Returns a *NotSingularError when more than one Password ID is found. +// Returns a *NotFoundError when no entities are found. +func (pq *PasswordQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = pq.Limit(2).IDs(setContextOp(ctx, pq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{password.Label} + default: + err = &NotSingularError{password.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (pq *PasswordQuery) OnlyIDX(ctx context.Context) int { + id, err := pq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Passwords. +func (pq *PasswordQuery) All(ctx context.Context) ([]*Password, error) { + ctx = setContextOp(ctx, pq.ctx, "All") + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Password, *PasswordQuery]() + return withInterceptors[[]*Password](ctx, pq, qr, pq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (pq *PasswordQuery) AllX(ctx context.Context) []*Password { + nodes, err := pq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Password IDs. +func (pq *PasswordQuery) IDs(ctx context.Context) (ids []int, err error) { + if pq.ctx.Unique == nil && pq.path != nil { + pq.Unique(true) + } + ctx = setContextOp(ctx, pq.ctx, "IDs") + if err = pq.Select(password.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (pq *PasswordQuery) IDsX(ctx context.Context) []int { + ids, err := pq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (pq *PasswordQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, pq.ctx, "Count") + if err := pq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, pq, querierCount[*PasswordQuery](), pq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (pq *PasswordQuery) CountX(ctx context.Context) int { + count, err := pq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (pq *PasswordQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, pq.ctx, "Exist") + switch _, err := pq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (pq *PasswordQuery) ExistX(ctx context.Context) bool { + exist, err := pq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PasswordQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (pq *PasswordQuery) Clone() *PasswordQuery { + if pq == nil { + return nil + } + return &PasswordQuery{ + config: pq.config, + ctx: pq.ctx.Clone(), + order: append([]password.OrderOption{}, pq.order...), + inters: append([]Interceptor{}, pq.inters...), + predicates: append([]predicate.Password{}, pq.predicates...), + // clone intermediate query. + sql: pq.sql.Clone(), + path: pq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Email string `json:"email,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Password.Query(). +// GroupBy(password.FieldEmail). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (pq *PasswordQuery) GroupBy(field string, fields ...string) *PasswordGroupBy { + pq.ctx.Fields = append([]string{field}, fields...) + grbuild := &PasswordGroupBy{build: pq} + grbuild.flds = &pq.ctx.Fields + grbuild.label = password.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Email string `json:"email,omitempty"` +// } +// +// client.Password.Query(). +// Select(password.FieldEmail). +// Scan(ctx, &v) +func (pq *PasswordQuery) Select(fields ...string) *PasswordSelect { + pq.ctx.Fields = append(pq.ctx.Fields, fields...) + sbuild := &PasswordSelect{PasswordQuery: pq} + sbuild.label = password.Label + sbuild.flds, sbuild.scan = &pq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PasswordSelect configured with the given aggregations. +func (pq *PasswordQuery) Aggregate(fns ...AggregateFunc) *PasswordSelect { + return pq.Select().Aggregate(fns...) +} + +func (pq *PasswordQuery) prepareQuery(ctx context.Context) error { + for _, inter := range pq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, pq); err != nil { + return err + } + } + } + for _, f := range pq.ctx.Fields { + if !password.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if pq.path != nil { + prev, err := pq.path(ctx) + if err != nil { + return err + } + pq.sql = prev + } + return nil +} + +func (pq *PasswordQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Password, error) { + var ( + nodes = []*Password{} + _spec = pq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Password).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Password{config: pq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, pq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (pq *PasswordQuery) sqlCount(ctx context.Context) (int, error) { + _spec := pq.querySpec() + _spec.Node.Columns = pq.ctx.Fields + if len(pq.ctx.Fields) > 0 { + _spec.Unique = pq.ctx.Unique != nil && *pq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, pq.driver, _spec) +} + +func (pq *PasswordQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(password.Table, password.Columns, sqlgraph.NewFieldSpec(password.FieldID, field.TypeInt)) + _spec.From = pq.sql + if unique := pq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if pq.path != nil { + _spec.Unique = true + } + if fields := pq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, password.FieldID) + for i := range fields { + if fields[i] != password.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := pq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := pq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := pq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := pq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (pq *PasswordQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(pq.driver.Dialect()) + t1 := builder.Table(password.Table) + columns := pq.ctx.Fields + if len(columns) == 0 { + columns = password.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if pq.sql != nil { + selector = pq.sql + selector.Select(selector.Columns(columns...)...) + } + if pq.ctx.Unique != nil && *pq.ctx.Unique { + selector.Distinct() + } + for _, p := range pq.predicates { + p(selector) + } + for _, p := range pq.order { + p(selector) + } + if offset := pq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := pq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// PasswordGroupBy is the group-by builder for Password entities. +type PasswordGroupBy struct { + selector + build *PasswordQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (pgb *PasswordGroupBy) Aggregate(fns ...AggregateFunc) *PasswordGroupBy { + pgb.fns = append(pgb.fns, fns...) + return pgb +} + +// Scan applies the selector query and scans the result into the given value. +func (pgb *PasswordGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pgb.build.ctx, "GroupBy") + if err := pgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PasswordQuery, *PasswordGroupBy](ctx, pgb.build, pgb, pgb.build.inters, v) +} + +func (pgb *PasswordGroupBy) sqlScan(ctx context.Context, root *PasswordQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(pgb.fns)) + for _, fn := range pgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*pgb.flds)+len(pgb.fns)) + for _, f := range *pgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*pgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := pgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PasswordSelect is the builder for selecting fields of Password entities. +type PasswordSelect struct { + *PasswordQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ps *PasswordSelect) Aggregate(fns ...AggregateFunc) *PasswordSelect { + ps.fns = append(ps.fns, fns...) + return ps +} + +// Scan applies the selector query and scans the result into the given value. +func (ps *PasswordSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ps.ctx, "Select") + if err := ps.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PasswordQuery, *PasswordSelect](ctx, ps.PasswordQuery, ps, ps.inters, v) +} + +func (ps *PasswordSelect) sqlScan(ctx context.Context, root *PasswordQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ps.fns)) + for _, fn := range ps.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ps.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ps.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/password_update.go b/vendor/github.com/dexidp/dex/storage/ent/db/password_update.go new file mode 100644 index 00000000..f7855dc7 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/password_update.go @@ -0,0 +1,293 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/password" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// PasswordUpdate is the builder for updating Password entities. +type PasswordUpdate struct { + config + hooks []Hook + mutation *PasswordMutation +} + +// Where appends a list predicates to the PasswordUpdate builder. +func (pu *PasswordUpdate) Where(ps ...predicate.Password) *PasswordUpdate { + pu.mutation.Where(ps...) + return pu +} + +// SetEmail sets the "email" field. +func (pu *PasswordUpdate) SetEmail(s string) *PasswordUpdate { + pu.mutation.SetEmail(s) + return pu +} + +// SetHash sets the "hash" field. +func (pu *PasswordUpdate) SetHash(b []byte) *PasswordUpdate { + pu.mutation.SetHash(b) + return pu +} + +// SetUsername sets the "username" field. +func (pu *PasswordUpdate) SetUsername(s string) *PasswordUpdate { + pu.mutation.SetUsername(s) + return pu +} + +// SetUserID sets the "user_id" field. +func (pu *PasswordUpdate) SetUserID(s string) *PasswordUpdate { + pu.mutation.SetUserID(s) + return pu +} + +// Mutation returns the PasswordMutation object of the builder. +func (pu *PasswordUpdate) Mutation() *PasswordMutation { + return pu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (pu *PasswordUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, pu.sqlSave, pu.mutation, pu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (pu *PasswordUpdate) SaveX(ctx context.Context) int { + affected, err := pu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (pu *PasswordUpdate) Exec(ctx context.Context) error { + _, err := pu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pu *PasswordUpdate) ExecX(ctx context.Context) { + if err := pu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pu *PasswordUpdate) check() error { + if v, ok := pu.mutation.Email(); ok { + if err := password.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`db: validator failed for field "Password.email": %w`, err)} + } + } + if v, ok := pu.mutation.Username(); ok { + if err := password.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`db: validator failed for field "Password.username": %w`, err)} + } + } + if v, ok := pu.mutation.UserID(); ok { + if err := password.UserIDValidator(v); err != nil { + return &ValidationError{Name: "user_id", err: fmt.Errorf(`db: validator failed for field "Password.user_id": %w`, err)} + } + } + return nil +} + +func (pu *PasswordUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := pu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(password.Table, password.Columns, sqlgraph.NewFieldSpec(password.FieldID, field.TypeInt)) + if ps := pu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := pu.mutation.Email(); ok { + _spec.SetField(password.FieldEmail, field.TypeString, value) + } + if value, ok := pu.mutation.Hash(); ok { + _spec.SetField(password.FieldHash, field.TypeBytes, value) + } + if value, ok := pu.mutation.Username(); ok { + _spec.SetField(password.FieldUsername, field.TypeString, value) + } + if value, ok := pu.mutation.UserID(); ok { + _spec.SetField(password.FieldUserID, field.TypeString, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, pu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{password.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + pu.mutation.done = true + return n, nil +} + +// PasswordUpdateOne is the builder for updating a single Password entity. +type PasswordUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PasswordMutation +} + +// SetEmail sets the "email" field. +func (puo *PasswordUpdateOne) SetEmail(s string) *PasswordUpdateOne { + puo.mutation.SetEmail(s) + return puo +} + +// SetHash sets the "hash" field. +func (puo *PasswordUpdateOne) SetHash(b []byte) *PasswordUpdateOne { + puo.mutation.SetHash(b) + return puo +} + +// SetUsername sets the "username" field. +func (puo *PasswordUpdateOne) SetUsername(s string) *PasswordUpdateOne { + puo.mutation.SetUsername(s) + return puo +} + +// SetUserID sets the "user_id" field. +func (puo *PasswordUpdateOne) SetUserID(s string) *PasswordUpdateOne { + puo.mutation.SetUserID(s) + return puo +} + +// Mutation returns the PasswordMutation object of the builder. +func (puo *PasswordUpdateOne) Mutation() *PasswordMutation { + return puo.mutation +} + +// Where appends a list predicates to the PasswordUpdate builder. +func (puo *PasswordUpdateOne) Where(ps ...predicate.Password) *PasswordUpdateOne { + puo.mutation.Where(ps...) + return puo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (puo *PasswordUpdateOne) Select(field string, fields ...string) *PasswordUpdateOne { + puo.fields = append([]string{field}, fields...) + return puo +} + +// Save executes the query and returns the updated Password entity. +func (puo *PasswordUpdateOne) Save(ctx context.Context) (*Password, error) { + return withHooks(ctx, puo.sqlSave, puo.mutation, puo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (puo *PasswordUpdateOne) SaveX(ctx context.Context) *Password { + node, err := puo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (puo *PasswordUpdateOne) Exec(ctx context.Context) error { + _, err := puo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (puo *PasswordUpdateOne) ExecX(ctx context.Context) { + if err := puo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (puo *PasswordUpdateOne) check() error { + if v, ok := puo.mutation.Email(); ok { + if err := password.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`db: validator failed for field "Password.email": %w`, err)} + } + } + if v, ok := puo.mutation.Username(); ok { + if err := password.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`db: validator failed for field "Password.username": %w`, err)} + } + } + if v, ok := puo.mutation.UserID(); ok { + if err := password.UserIDValidator(v); err != nil { + return &ValidationError{Name: "user_id", err: fmt.Errorf(`db: validator failed for field "Password.user_id": %w`, err)} + } + } + return nil +} + +func (puo *PasswordUpdateOne) sqlSave(ctx context.Context) (_node *Password, err error) { + if err := puo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(password.Table, password.Columns, sqlgraph.NewFieldSpec(password.FieldID, field.TypeInt)) + id, ok := puo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "Password.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := puo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, password.FieldID) + for _, f := range fields { + if !password.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != password.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := puo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := puo.mutation.Email(); ok { + _spec.SetField(password.FieldEmail, field.TypeString, value) + } + if value, ok := puo.mutation.Hash(); ok { + _spec.SetField(password.FieldHash, field.TypeBytes, value) + } + if value, ok := puo.mutation.Username(); ok { + _spec.SetField(password.FieldUsername, field.TypeString, value) + } + if value, ok := puo.mutation.UserID(); ok { + _spec.SetField(password.FieldUserID, field.TypeString, value) + } + _node = &Password{config: puo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, puo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{password.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + puo.mutation.done = true + return _node, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/predicate/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/predicate/BUILD new file mode 100644 index 00000000..0bc0d861 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/predicate/BUILD @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "predicate", + srcs = ["predicate.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/predicate", + importpath = "github.com/dexidp/dex/storage/ent/db/predicate", + visibility = ["//visibility:public"], + deps = ["//vendor/entgo.io/ent/dialect/sql"], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/predicate/predicate.go b/vendor/github.com/dexidp/dex/storage/ent/db/predicate/predicate.go new file mode 100644 index 00000000..ed07a071 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/predicate/predicate.go @@ -0,0 +1,37 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// AuthCode is the predicate function for authcode builders. +type AuthCode func(*sql.Selector) + +// AuthRequest is the predicate function for authrequest builders. +type AuthRequest func(*sql.Selector) + +// Connector is the predicate function for connector builders. +type Connector func(*sql.Selector) + +// DeviceRequest is the predicate function for devicerequest builders. +type DeviceRequest func(*sql.Selector) + +// DeviceToken is the predicate function for devicetoken builders. +type DeviceToken func(*sql.Selector) + +// Keys is the predicate function for keys builders. +type Keys func(*sql.Selector) + +// OAuth2Client is the predicate function for oauth2client builders. +type OAuth2Client func(*sql.Selector) + +// OfflineSession is the predicate function for offlinesession builders. +type OfflineSession func(*sql.Selector) + +// Password is the predicate function for password builders. +type Password func(*sql.Selector) + +// RefreshToken is the predicate function for refreshtoken builders. +type RefreshToken func(*sql.Selector) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken.go b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken.go new file mode 100644 index 00000000..f116d684 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken.go @@ -0,0 +1,269 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/refreshtoken" +) + +// RefreshToken is the model entity for the RefreshToken schema. +type RefreshToken struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // ClientID holds the value of the "client_id" field. + ClientID string `json:"client_id,omitempty"` + // Scopes holds the value of the "scopes" field. + Scopes []string `json:"scopes,omitempty"` + // Nonce holds the value of the "nonce" field. + Nonce string `json:"nonce,omitempty"` + // ClaimsUserID holds the value of the "claims_user_id" field. + ClaimsUserID string `json:"claims_user_id,omitempty"` + // ClaimsUsername holds the value of the "claims_username" field. + ClaimsUsername string `json:"claims_username,omitempty"` + // ClaimsEmail holds the value of the "claims_email" field. + ClaimsEmail string `json:"claims_email,omitempty"` + // ClaimsEmailVerified holds the value of the "claims_email_verified" field. + ClaimsEmailVerified bool `json:"claims_email_verified,omitempty"` + // ClaimsGroups holds the value of the "claims_groups" field. + ClaimsGroups []string `json:"claims_groups,omitempty"` + // ClaimsPreferredUsername holds the value of the "claims_preferred_username" field. + ClaimsPreferredUsername string `json:"claims_preferred_username,omitempty"` + // ConnectorID holds the value of the "connector_id" field. + ConnectorID string `json:"connector_id,omitempty"` + // ConnectorData holds the value of the "connector_data" field. + ConnectorData *[]byte `json:"connector_data,omitempty"` + // Token holds the value of the "token" field. + Token string `json:"token,omitempty"` + // ObsoleteToken holds the value of the "obsolete_token" field. + ObsoleteToken string `json:"obsolete_token,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // LastUsed holds the value of the "last_used" field. + LastUsed time.Time `json:"last_used,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*RefreshToken) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case refreshtoken.FieldScopes, refreshtoken.FieldClaimsGroups, refreshtoken.FieldConnectorData: + values[i] = new([]byte) + case refreshtoken.FieldClaimsEmailVerified: + values[i] = new(sql.NullBool) + case refreshtoken.FieldID, refreshtoken.FieldClientID, refreshtoken.FieldNonce, refreshtoken.FieldClaimsUserID, refreshtoken.FieldClaimsUsername, refreshtoken.FieldClaimsEmail, refreshtoken.FieldClaimsPreferredUsername, refreshtoken.FieldConnectorID, refreshtoken.FieldToken, refreshtoken.FieldObsoleteToken: + values[i] = new(sql.NullString) + case refreshtoken.FieldCreatedAt, refreshtoken.FieldLastUsed: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the RefreshToken fields. +func (rt *RefreshToken) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case refreshtoken.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + rt.ID = value.String + } + case refreshtoken.FieldClientID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field client_id", values[i]) + } else if value.Valid { + rt.ClientID = value.String + } + case refreshtoken.FieldScopes: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field scopes", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &rt.Scopes); err != nil { + return fmt.Errorf("unmarshal field scopes: %w", err) + } + } + case refreshtoken.FieldNonce: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field nonce", values[i]) + } else if value.Valid { + rt.Nonce = value.String + } + case refreshtoken.FieldClaimsUserID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_user_id", values[i]) + } else if value.Valid { + rt.ClaimsUserID = value.String + } + case refreshtoken.FieldClaimsUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_username", values[i]) + } else if value.Valid { + rt.ClaimsUsername = value.String + } + case refreshtoken.FieldClaimsEmail: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_email", values[i]) + } else if value.Valid { + rt.ClaimsEmail = value.String + } + case refreshtoken.FieldClaimsEmailVerified: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field claims_email_verified", values[i]) + } else if value.Valid { + rt.ClaimsEmailVerified = value.Bool + } + case refreshtoken.FieldClaimsGroups: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field claims_groups", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &rt.ClaimsGroups); err != nil { + return fmt.Errorf("unmarshal field claims_groups: %w", err) + } + } + case refreshtoken.FieldClaimsPreferredUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field claims_preferred_username", values[i]) + } else if value.Valid { + rt.ClaimsPreferredUsername = value.String + } + case refreshtoken.FieldConnectorID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field connector_id", values[i]) + } else if value.Valid { + rt.ConnectorID = value.String + } + case refreshtoken.FieldConnectorData: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field connector_data", values[i]) + } else if value != nil { + rt.ConnectorData = value + } + case refreshtoken.FieldToken: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field token", values[i]) + } else if value.Valid { + rt.Token = value.String + } + case refreshtoken.FieldObsoleteToken: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field obsolete_token", values[i]) + } else if value.Valid { + rt.ObsoleteToken = value.String + } + case refreshtoken.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + rt.CreatedAt = value.Time + } + case refreshtoken.FieldLastUsed: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_used", values[i]) + } else if value.Valid { + rt.LastUsed = value.Time + } + default: + rt.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the RefreshToken. +// This includes values selected through modifiers, order, etc. +func (rt *RefreshToken) Value(name string) (ent.Value, error) { + return rt.selectValues.Get(name) +} + +// Update returns a builder for updating this RefreshToken. +// Note that you need to call RefreshToken.Unwrap() before calling this method if this RefreshToken +// was returned from a transaction, and the transaction was committed or rolled back. +func (rt *RefreshToken) Update() *RefreshTokenUpdateOne { + return NewRefreshTokenClient(rt.config).UpdateOne(rt) +} + +// Unwrap unwraps the RefreshToken entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (rt *RefreshToken) Unwrap() *RefreshToken { + _tx, ok := rt.config.driver.(*txDriver) + if !ok { + panic("db: RefreshToken is not a transactional entity") + } + rt.config.driver = _tx.drv + return rt +} + +// String implements the fmt.Stringer. +func (rt *RefreshToken) String() string { + var builder strings.Builder + builder.WriteString("RefreshToken(") + builder.WriteString(fmt.Sprintf("id=%v, ", rt.ID)) + builder.WriteString("client_id=") + builder.WriteString(rt.ClientID) + builder.WriteString(", ") + builder.WriteString("scopes=") + builder.WriteString(fmt.Sprintf("%v", rt.Scopes)) + builder.WriteString(", ") + builder.WriteString("nonce=") + builder.WriteString(rt.Nonce) + builder.WriteString(", ") + builder.WriteString("claims_user_id=") + builder.WriteString(rt.ClaimsUserID) + builder.WriteString(", ") + builder.WriteString("claims_username=") + builder.WriteString(rt.ClaimsUsername) + builder.WriteString(", ") + builder.WriteString("claims_email=") + builder.WriteString(rt.ClaimsEmail) + builder.WriteString(", ") + builder.WriteString("claims_email_verified=") + builder.WriteString(fmt.Sprintf("%v", rt.ClaimsEmailVerified)) + builder.WriteString(", ") + builder.WriteString("claims_groups=") + builder.WriteString(fmt.Sprintf("%v", rt.ClaimsGroups)) + builder.WriteString(", ") + builder.WriteString("claims_preferred_username=") + builder.WriteString(rt.ClaimsPreferredUsername) + builder.WriteString(", ") + builder.WriteString("connector_id=") + builder.WriteString(rt.ConnectorID) + builder.WriteString(", ") + if v := rt.ConnectorData; v != nil { + builder.WriteString("connector_data=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("token=") + builder.WriteString(rt.Token) + builder.WriteString(", ") + builder.WriteString("obsolete_token=") + builder.WriteString(rt.ObsoleteToken) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(rt.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("last_used=") + builder.WriteString(rt.LastUsed.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// RefreshTokens is a parsable slice of RefreshToken. +type RefreshTokens []*RefreshToken diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/BUILD b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/BUILD new file mode 100644 index 00000000..604074f8 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "refreshtoken", + srcs = [ + "refreshtoken.go", + "where.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken", + importpath = "github.com/dexidp/dex/storage/ent/db/refreshtoken", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent/dialect/sql", + "//vendor/github.com/dexidp/dex/storage/ent/db/predicate", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/refreshtoken.go b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/refreshtoken.go new file mode 100644 index 00000000..1163113d --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/refreshtoken.go @@ -0,0 +1,173 @@ +// Code generated by ent, DO NOT EDIT. + +package refreshtoken + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the refreshtoken type in the database. + Label = "refresh_token" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldClientID holds the string denoting the client_id field in the database. + FieldClientID = "client_id" + // FieldScopes holds the string denoting the scopes field in the database. + FieldScopes = "scopes" + // FieldNonce holds the string denoting the nonce field in the database. + FieldNonce = "nonce" + // FieldClaimsUserID holds the string denoting the claims_user_id field in the database. + FieldClaimsUserID = "claims_user_id" + // FieldClaimsUsername holds the string denoting the claims_username field in the database. + FieldClaimsUsername = "claims_username" + // FieldClaimsEmail holds the string denoting the claims_email field in the database. + FieldClaimsEmail = "claims_email" + // FieldClaimsEmailVerified holds the string denoting the claims_email_verified field in the database. + FieldClaimsEmailVerified = "claims_email_verified" + // FieldClaimsGroups holds the string denoting the claims_groups field in the database. + FieldClaimsGroups = "claims_groups" + // FieldClaimsPreferredUsername holds the string denoting the claims_preferred_username field in the database. + FieldClaimsPreferredUsername = "claims_preferred_username" + // FieldConnectorID holds the string denoting the connector_id field in the database. + FieldConnectorID = "connector_id" + // FieldConnectorData holds the string denoting the connector_data field in the database. + FieldConnectorData = "connector_data" + // FieldToken holds the string denoting the token field in the database. + FieldToken = "token" + // FieldObsoleteToken holds the string denoting the obsolete_token field in the database. + FieldObsoleteToken = "obsolete_token" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldLastUsed holds the string denoting the last_used field in the database. + FieldLastUsed = "last_used" + // Table holds the table name of the refreshtoken in the database. + Table = "refresh_tokens" +) + +// Columns holds all SQL columns for refreshtoken fields. +var Columns = []string{ + FieldID, + FieldClientID, + FieldScopes, + FieldNonce, + FieldClaimsUserID, + FieldClaimsUsername, + FieldClaimsEmail, + FieldClaimsEmailVerified, + FieldClaimsGroups, + FieldClaimsPreferredUsername, + FieldConnectorID, + FieldConnectorData, + FieldToken, + FieldObsoleteToken, + FieldCreatedAt, + FieldLastUsed, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // ClientIDValidator is a validator for the "client_id" field. It is called by the builders before save. + ClientIDValidator func(string) error + // NonceValidator is a validator for the "nonce" field. It is called by the builders before save. + NonceValidator func(string) error + // ClaimsUserIDValidator is a validator for the "claims_user_id" field. It is called by the builders before save. + ClaimsUserIDValidator func(string) error + // ClaimsUsernameValidator is a validator for the "claims_username" field. It is called by the builders before save. + ClaimsUsernameValidator func(string) error + // ClaimsEmailValidator is a validator for the "claims_email" field. It is called by the builders before save. + ClaimsEmailValidator func(string) error + // DefaultClaimsPreferredUsername holds the default value on creation for the "claims_preferred_username" field. + DefaultClaimsPreferredUsername string + // ConnectorIDValidator is a validator for the "connector_id" field. It is called by the builders before save. + ConnectorIDValidator func(string) error + // DefaultToken holds the default value on creation for the "token" field. + DefaultToken string + // DefaultObsoleteToken holds the default value on creation for the "obsolete_token" field. + DefaultObsoleteToken string + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultLastUsed holds the default value on creation for the "last_used" field. + DefaultLastUsed func() time.Time + // IDValidator is a validator for the "id" field. It is called by the builders before save. + IDValidator func(string) error +) + +// OrderOption defines the ordering options for the RefreshToken queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByClientID orders the results by the client_id field. +func ByClientID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClientID, opts...).ToFunc() +} + +// ByNonce orders the results by the nonce field. +func ByNonce(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNonce, opts...).ToFunc() +} + +// ByClaimsUserID orders the results by the claims_user_id field. +func ByClaimsUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsUserID, opts...).ToFunc() +} + +// ByClaimsUsername orders the results by the claims_username field. +func ByClaimsUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsUsername, opts...).ToFunc() +} + +// ByClaimsEmail orders the results by the claims_email field. +func ByClaimsEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsEmail, opts...).ToFunc() +} + +// ByClaimsEmailVerified orders the results by the claims_email_verified field. +func ByClaimsEmailVerified(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsEmailVerified, opts...).ToFunc() +} + +// ByClaimsPreferredUsername orders the results by the claims_preferred_username field. +func ByClaimsPreferredUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaimsPreferredUsername, opts...).ToFunc() +} + +// ByConnectorID orders the results by the connector_id field. +func ByConnectorID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldConnectorID, opts...).ToFunc() +} + +// ByToken orders the results by the token field. +func ByToken(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldToken, opts...).ToFunc() +} + +// ByObsoleteToken orders the results by the obsolete_token field. +func ByObsoleteToken(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldObsoleteToken, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByLastUsed orders the results by the last_used field. +func ByLastUsed(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastUsed, opts...).ToFunc() +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/where.go b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/where.go new file mode 100644 index 00000000..8c50a71a --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken/where.go @@ -0,0 +1,907 @@ +// Code generated by ent, DO NOT EDIT. + +package refreshtoken + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/dexidp/dex/storage/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContainsFold(FieldID, id)) +} + +// ClientID applies equality check predicate on the "client_id" field. It's identical to ClientIDEQ. +func ClientID(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClientID, v)) +} + +// Nonce applies equality check predicate on the "nonce" field. It's identical to NonceEQ. +func Nonce(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldNonce, v)) +} + +// ClaimsUserID applies equality check predicate on the "claims_user_id" field. It's identical to ClaimsUserIDEQ. +func ClaimsUserID(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClaimsUserID, v)) +} + +// ClaimsUsername applies equality check predicate on the "claims_username" field. It's identical to ClaimsUsernameEQ. +func ClaimsUsername(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClaimsUsername, v)) +} + +// ClaimsEmail applies equality check predicate on the "claims_email" field. It's identical to ClaimsEmailEQ. +func ClaimsEmail(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailVerified applies equality check predicate on the "claims_email_verified" field. It's identical to ClaimsEmailVerifiedEQ. +func ClaimsEmailVerified(v bool) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsPreferredUsername applies equality check predicate on the "claims_preferred_username" field. It's identical to ClaimsPreferredUsernameEQ. +func ClaimsPreferredUsername(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClaimsPreferredUsername, v)) +} + +// ConnectorID applies equality check predicate on the "connector_id" field. It's identical to ConnectorIDEQ. +func ConnectorID(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldConnectorID, v)) +} + +// ConnectorData applies equality check predicate on the "connector_data" field. It's identical to ConnectorDataEQ. +func ConnectorData(v []byte) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldConnectorData, v)) +} + +// Token applies equality check predicate on the "token" field. It's identical to TokenEQ. +func Token(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldToken, v)) +} + +// ObsoleteToken applies equality check predicate on the "obsolete_token" field. It's identical to ObsoleteTokenEQ. +func ObsoleteToken(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldObsoleteToken, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldCreatedAt, v)) +} + +// LastUsed applies equality check predicate on the "last_used" field. It's identical to LastUsedEQ. +func LastUsed(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldLastUsed, v)) +} + +// ClientIDEQ applies the EQ predicate on the "client_id" field. +func ClientIDEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClientID, v)) +} + +// ClientIDNEQ applies the NEQ predicate on the "client_id" field. +func ClientIDNEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldClientID, v)) +} + +// ClientIDIn applies the In predicate on the "client_id" field. +func ClientIDIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldClientID, vs...)) +} + +// ClientIDNotIn applies the NotIn predicate on the "client_id" field. +func ClientIDNotIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldClientID, vs...)) +} + +// ClientIDGT applies the GT predicate on the "client_id" field. +func ClientIDGT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldClientID, v)) +} + +// ClientIDGTE applies the GTE predicate on the "client_id" field. +func ClientIDGTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldClientID, v)) +} + +// ClientIDLT applies the LT predicate on the "client_id" field. +func ClientIDLT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldClientID, v)) +} + +// ClientIDLTE applies the LTE predicate on the "client_id" field. +func ClientIDLTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldClientID, v)) +} + +// ClientIDContains applies the Contains predicate on the "client_id" field. +func ClientIDContains(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContains(FieldClientID, v)) +} + +// ClientIDHasPrefix applies the HasPrefix predicate on the "client_id" field. +func ClientIDHasPrefix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasPrefix(FieldClientID, v)) +} + +// ClientIDHasSuffix applies the HasSuffix predicate on the "client_id" field. +func ClientIDHasSuffix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasSuffix(FieldClientID, v)) +} + +// ClientIDEqualFold applies the EqualFold predicate on the "client_id" field. +func ClientIDEqualFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEqualFold(FieldClientID, v)) +} + +// ClientIDContainsFold applies the ContainsFold predicate on the "client_id" field. +func ClientIDContainsFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContainsFold(FieldClientID, v)) +} + +// ScopesIsNil applies the IsNil predicate on the "scopes" field. +func ScopesIsNil() predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIsNull(FieldScopes)) +} + +// ScopesNotNil applies the NotNil predicate on the "scopes" field. +func ScopesNotNil() predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotNull(FieldScopes)) +} + +// NonceEQ applies the EQ predicate on the "nonce" field. +func NonceEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldNonce, v)) +} + +// NonceNEQ applies the NEQ predicate on the "nonce" field. +func NonceNEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldNonce, v)) +} + +// NonceIn applies the In predicate on the "nonce" field. +func NonceIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldNonce, vs...)) +} + +// NonceNotIn applies the NotIn predicate on the "nonce" field. +func NonceNotIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldNonce, vs...)) +} + +// NonceGT applies the GT predicate on the "nonce" field. +func NonceGT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldNonce, v)) +} + +// NonceGTE applies the GTE predicate on the "nonce" field. +func NonceGTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldNonce, v)) +} + +// NonceLT applies the LT predicate on the "nonce" field. +func NonceLT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldNonce, v)) +} + +// NonceLTE applies the LTE predicate on the "nonce" field. +func NonceLTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldNonce, v)) +} + +// NonceContains applies the Contains predicate on the "nonce" field. +func NonceContains(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContains(FieldNonce, v)) +} + +// NonceHasPrefix applies the HasPrefix predicate on the "nonce" field. +func NonceHasPrefix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasPrefix(FieldNonce, v)) +} + +// NonceHasSuffix applies the HasSuffix predicate on the "nonce" field. +func NonceHasSuffix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasSuffix(FieldNonce, v)) +} + +// NonceEqualFold applies the EqualFold predicate on the "nonce" field. +func NonceEqualFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEqualFold(FieldNonce, v)) +} + +// NonceContainsFold applies the ContainsFold predicate on the "nonce" field. +func NonceContainsFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContainsFold(FieldNonce, v)) +} + +// ClaimsUserIDEQ applies the EQ predicate on the "claims_user_id" field. +func ClaimsUserIDEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClaimsUserID, v)) +} + +// ClaimsUserIDNEQ applies the NEQ predicate on the "claims_user_id" field. +func ClaimsUserIDNEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldClaimsUserID, v)) +} + +// ClaimsUserIDIn applies the In predicate on the "claims_user_id" field. +func ClaimsUserIDIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldClaimsUserID, vs...)) +} + +// ClaimsUserIDNotIn applies the NotIn predicate on the "claims_user_id" field. +func ClaimsUserIDNotIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldClaimsUserID, vs...)) +} + +// ClaimsUserIDGT applies the GT predicate on the "claims_user_id" field. +func ClaimsUserIDGT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldClaimsUserID, v)) +} + +// ClaimsUserIDGTE applies the GTE predicate on the "claims_user_id" field. +func ClaimsUserIDGTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldClaimsUserID, v)) +} + +// ClaimsUserIDLT applies the LT predicate on the "claims_user_id" field. +func ClaimsUserIDLT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldClaimsUserID, v)) +} + +// ClaimsUserIDLTE applies the LTE predicate on the "claims_user_id" field. +func ClaimsUserIDLTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldClaimsUserID, v)) +} + +// ClaimsUserIDContains applies the Contains predicate on the "claims_user_id" field. +func ClaimsUserIDContains(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContains(FieldClaimsUserID, v)) +} + +// ClaimsUserIDHasPrefix applies the HasPrefix predicate on the "claims_user_id" field. +func ClaimsUserIDHasPrefix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasPrefix(FieldClaimsUserID, v)) +} + +// ClaimsUserIDHasSuffix applies the HasSuffix predicate on the "claims_user_id" field. +func ClaimsUserIDHasSuffix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasSuffix(FieldClaimsUserID, v)) +} + +// ClaimsUserIDEqualFold applies the EqualFold predicate on the "claims_user_id" field. +func ClaimsUserIDEqualFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEqualFold(FieldClaimsUserID, v)) +} + +// ClaimsUserIDContainsFold applies the ContainsFold predicate on the "claims_user_id" field. +func ClaimsUserIDContainsFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContainsFold(FieldClaimsUserID, v)) +} + +// ClaimsUsernameEQ applies the EQ predicate on the "claims_username" field. +func ClaimsUsernameEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClaimsUsername, v)) +} + +// ClaimsUsernameNEQ applies the NEQ predicate on the "claims_username" field. +func ClaimsUsernameNEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldClaimsUsername, v)) +} + +// ClaimsUsernameIn applies the In predicate on the "claims_username" field. +func ClaimsUsernameIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldClaimsUsername, vs...)) +} + +// ClaimsUsernameNotIn applies the NotIn predicate on the "claims_username" field. +func ClaimsUsernameNotIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldClaimsUsername, vs...)) +} + +// ClaimsUsernameGT applies the GT predicate on the "claims_username" field. +func ClaimsUsernameGT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldClaimsUsername, v)) +} + +// ClaimsUsernameGTE applies the GTE predicate on the "claims_username" field. +func ClaimsUsernameGTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldClaimsUsername, v)) +} + +// ClaimsUsernameLT applies the LT predicate on the "claims_username" field. +func ClaimsUsernameLT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldClaimsUsername, v)) +} + +// ClaimsUsernameLTE applies the LTE predicate on the "claims_username" field. +func ClaimsUsernameLTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldClaimsUsername, v)) +} + +// ClaimsUsernameContains applies the Contains predicate on the "claims_username" field. +func ClaimsUsernameContains(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContains(FieldClaimsUsername, v)) +} + +// ClaimsUsernameHasPrefix applies the HasPrefix predicate on the "claims_username" field. +func ClaimsUsernameHasPrefix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasPrefix(FieldClaimsUsername, v)) +} + +// ClaimsUsernameHasSuffix applies the HasSuffix predicate on the "claims_username" field. +func ClaimsUsernameHasSuffix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasSuffix(FieldClaimsUsername, v)) +} + +// ClaimsUsernameEqualFold applies the EqualFold predicate on the "claims_username" field. +func ClaimsUsernameEqualFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEqualFold(FieldClaimsUsername, v)) +} + +// ClaimsUsernameContainsFold applies the ContainsFold predicate on the "claims_username" field. +func ClaimsUsernameContainsFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContainsFold(FieldClaimsUsername, v)) +} + +// ClaimsEmailEQ applies the EQ predicate on the "claims_email" field. +func ClaimsEmailEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailNEQ applies the NEQ predicate on the "claims_email" field. +func ClaimsEmailNEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldClaimsEmail, v)) +} + +// ClaimsEmailIn applies the In predicate on the "claims_email" field. +func ClaimsEmailIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldClaimsEmail, vs...)) +} + +// ClaimsEmailNotIn applies the NotIn predicate on the "claims_email" field. +func ClaimsEmailNotIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldClaimsEmail, vs...)) +} + +// ClaimsEmailGT applies the GT predicate on the "claims_email" field. +func ClaimsEmailGT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldClaimsEmail, v)) +} + +// ClaimsEmailGTE applies the GTE predicate on the "claims_email" field. +func ClaimsEmailGTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldClaimsEmail, v)) +} + +// ClaimsEmailLT applies the LT predicate on the "claims_email" field. +func ClaimsEmailLT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldClaimsEmail, v)) +} + +// ClaimsEmailLTE applies the LTE predicate on the "claims_email" field. +func ClaimsEmailLTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldClaimsEmail, v)) +} + +// ClaimsEmailContains applies the Contains predicate on the "claims_email" field. +func ClaimsEmailContains(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContains(FieldClaimsEmail, v)) +} + +// ClaimsEmailHasPrefix applies the HasPrefix predicate on the "claims_email" field. +func ClaimsEmailHasPrefix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasPrefix(FieldClaimsEmail, v)) +} + +// ClaimsEmailHasSuffix applies the HasSuffix predicate on the "claims_email" field. +func ClaimsEmailHasSuffix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasSuffix(FieldClaimsEmail, v)) +} + +// ClaimsEmailEqualFold applies the EqualFold predicate on the "claims_email" field. +func ClaimsEmailEqualFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEqualFold(FieldClaimsEmail, v)) +} + +// ClaimsEmailContainsFold applies the ContainsFold predicate on the "claims_email" field. +func ClaimsEmailContainsFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContainsFold(FieldClaimsEmail, v)) +} + +// ClaimsEmailVerifiedEQ applies the EQ predicate on the "claims_email_verified" field. +func ClaimsEmailVerifiedEQ(v bool) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsEmailVerifiedNEQ applies the NEQ predicate on the "claims_email_verified" field. +func ClaimsEmailVerifiedNEQ(v bool) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldClaimsEmailVerified, v)) +} + +// ClaimsGroupsIsNil applies the IsNil predicate on the "claims_groups" field. +func ClaimsGroupsIsNil() predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIsNull(FieldClaimsGroups)) +} + +// ClaimsGroupsNotNil applies the NotNil predicate on the "claims_groups" field. +func ClaimsGroupsNotNil() predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotNull(FieldClaimsGroups)) +} + +// ClaimsPreferredUsernameEQ applies the EQ predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameNEQ applies the NEQ predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameNEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameIn applies the In predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldClaimsPreferredUsername, vs...)) +} + +// ClaimsPreferredUsernameNotIn applies the NotIn predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameNotIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldClaimsPreferredUsername, vs...)) +} + +// ClaimsPreferredUsernameGT applies the GT predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameGT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameGTE applies the GTE predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameGTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameLT applies the LT predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameLT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameLTE applies the LTE predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameLTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameContains applies the Contains predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameContains(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContains(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameHasPrefix applies the HasPrefix predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameHasPrefix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasPrefix(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameHasSuffix applies the HasSuffix predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameHasSuffix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasSuffix(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameEqualFold applies the EqualFold predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameEqualFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEqualFold(FieldClaimsPreferredUsername, v)) +} + +// ClaimsPreferredUsernameContainsFold applies the ContainsFold predicate on the "claims_preferred_username" field. +func ClaimsPreferredUsernameContainsFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContainsFold(FieldClaimsPreferredUsername, v)) +} + +// ConnectorIDEQ applies the EQ predicate on the "connector_id" field. +func ConnectorIDEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldConnectorID, v)) +} + +// ConnectorIDNEQ applies the NEQ predicate on the "connector_id" field. +func ConnectorIDNEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldConnectorID, v)) +} + +// ConnectorIDIn applies the In predicate on the "connector_id" field. +func ConnectorIDIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldConnectorID, vs...)) +} + +// ConnectorIDNotIn applies the NotIn predicate on the "connector_id" field. +func ConnectorIDNotIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldConnectorID, vs...)) +} + +// ConnectorIDGT applies the GT predicate on the "connector_id" field. +func ConnectorIDGT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldConnectorID, v)) +} + +// ConnectorIDGTE applies the GTE predicate on the "connector_id" field. +func ConnectorIDGTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldConnectorID, v)) +} + +// ConnectorIDLT applies the LT predicate on the "connector_id" field. +func ConnectorIDLT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldConnectorID, v)) +} + +// ConnectorIDLTE applies the LTE predicate on the "connector_id" field. +func ConnectorIDLTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldConnectorID, v)) +} + +// ConnectorIDContains applies the Contains predicate on the "connector_id" field. +func ConnectorIDContains(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContains(FieldConnectorID, v)) +} + +// ConnectorIDHasPrefix applies the HasPrefix predicate on the "connector_id" field. +func ConnectorIDHasPrefix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasPrefix(FieldConnectorID, v)) +} + +// ConnectorIDHasSuffix applies the HasSuffix predicate on the "connector_id" field. +func ConnectorIDHasSuffix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasSuffix(FieldConnectorID, v)) +} + +// ConnectorIDEqualFold applies the EqualFold predicate on the "connector_id" field. +func ConnectorIDEqualFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEqualFold(FieldConnectorID, v)) +} + +// ConnectorIDContainsFold applies the ContainsFold predicate on the "connector_id" field. +func ConnectorIDContainsFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContainsFold(FieldConnectorID, v)) +} + +// ConnectorDataEQ applies the EQ predicate on the "connector_data" field. +func ConnectorDataEQ(v []byte) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldConnectorData, v)) +} + +// ConnectorDataNEQ applies the NEQ predicate on the "connector_data" field. +func ConnectorDataNEQ(v []byte) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldConnectorData, v)) +} + +// ConnectorDataIn applies the In predicate on the "connector_data" field. +func ConnectorDataIn(vs ...[]byte) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldConnectorData, vs...)) +} + +// ConnectorDataNotIn applies the NotIn predicate on the "connector_data" field. +func ConnectorDataNotIn(vs ...[]byte) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldConnectorData, vs...)) +} + +// ConnectorDataGT applies the GT predicate on the "connector_data" field. +func ConnectorDataGT(v []byte) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldConnectorData, v)) +} + +// ConnectorDataGTE applies the GTE predicate on the "connector_data" field. +func ConnectorDataGTE(v []byte) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldConnectorData, v)) +} + +// ConnectorDataLT applies the LT predicate on the "connector_data" field. +func ConnectorDataLT(v []byte) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldConnectorData, v)) +} + +// ConnectorDataLTE applies the LTE predicate on the "connector_data" field. +func ConnectorDataLTE(v []byte) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldConnectorData, v)) +} + +// ConnectorDataIsNil applies the IsNil predicate on the "connector_data" field. +func ConnectorDataIsNil() predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIsNull(FieldConnectorData)) +} + +// ConnectorDataNotNil applies the NotNil predicate on the "connector_data" field. +func ConnectorDataNotNil() predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotNull(FieldConnectorData)) +} + +// TokenEQ applies the EQ predicate on the "token" field. +func TokenEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldToken, v)) +} + +// TokenNEQ applies the NEQ predicate on the "token" field. +func TokenNEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldToken, v)) +} + +// TokenIn applies the In predicate on the "token" field. +func TokenIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldToken, vs...)) +} + +// TokenNotIn applies the NotIn predicate on the "token" field. +func TokenNotIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldToken, vs...)) +} + +// TokenGT applies the GT predicate on the "token" field. +func TokenGT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldToken, v)) +} + +// TokenGTE applies the GTE predicate on the "token" field. +func TokenGTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldToken, v)) +} + +// TokenLT applies the LT predicate on the "token" field. +func TokenLT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldToken, v)) +} + +// TokenLTE applies the LTE predicate on the "token" field. +func TokenLTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldToken, v)) +} + +// TokenContains applies the Contains predicate on the "token" field. +func TokenContains(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContains(FieldToken, v)) +} + +// TokenHasPrefix applies the HasPrefix predicate on the "token" field. +func TokenHasPrefix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasPrefix(FieldToken, v)) +} + +// TokenHasSuffix applies the HasSuffix predicate on the "token" field. +func TokenHasSuffix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasSuffix(FieldToken, v)) +} + +// TokenEqualFold applies the EqualFold predicate on the "token" field. +func TokenEqualFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEqualFold(FieldToken, v)) +} + +// TokenContainsFold applies the ContainsFold predicate on the "token" field. +func TokenContainsFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContainsFold(FieldToken, v)) +} + +// ObsoleteTokenEQ applies the EQ predicate on the "obsolete_token" field. +func ObsoleteTokenEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldObsoleteToken, v)) +} + +// ObsoleteTokenNEQ applies the NEQ predicate on the "obsolete_token" field. +func ObsoleteTokenNEQ(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldObsoleteToken, v)) +} + +// ObsoleteTokenIn applies the In predicate on the "obsolete_token" field. +func ObsoleteTokenIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldObsoleteToken, vs...)) +} + +// ObsoleteTokenNotIn applies the NotIn predicate on the "obsolete_token" field. +func ObsoleteTokenNotIn(vs ...string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldObsoleteToken, vs...)) +} + +// ObsoleteTokenGT applies the GT predicate on the "obsolete_token" field. +func ObsoleteTokenGT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldObsoleteToken, v)) +} + +// ObsoleteTokenGTE applies the GTE predicate on the "obsolete_token" field. +func ObsoleteTokenGTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldObsoleteToken, v)) +} + +// ObsoleteTokenLT applies the LT predicate on the "obsolete_token" field. +func ObsoleteTokenLT(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldObsoleteToken, v)) +} + +// ObsoleteTokenLTE applies the LTE predicate on the "obsolete_token" field. +func ObsoleteTokenLTE(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldObsoleteToken, v)) +} + +// ObsoleteTokenContains applies the Contains predicate on the "obsolete_token" field. +func ObsoleteTokenContains(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContains(FieldObsoleteToken, v)) +} + +// ObsoleteTokenHasPrefix applies the HasPrefix predicate on the "obsolete_token" field. +func ObsoleteTokenHasPrefix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasPrefix(FieldObsoleteToken, v)) +} + +// ObsoleteTokenHasSuffix applies the HasSuffix predicate on the "obsolete_token" field. +func ObsoleteTokenHasSuffix(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldHasSuffix(FieldObsoleteToken, v)) +} + +// ObsoleteTokenEqualFold applies the EqualFold predicate on the "obsolete_token" field. +func ObsoleteTokenEqualFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEqualFold(FieldObsoleteToken, v)) +} + +// ObsoleteTokenContainsFold applies the ContainsFold predicate on the "obsolete_token" field. +func ObsoleteTokenContainsFold(v string) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldContainsFold(FieldObsoleteToken, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldCreatedAt, v)) +} + +// LastUsedEQ applies the EQ predicate on the "last_used" field. +func LastUsedEQ(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldEQ(FieldLastUsed, v)) +} + +// LastUsedNEQ applies the NEQ predicate on the "last_used" field. +func LastUsedNEQ(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNEQ(FieldLastUsed, v)) +} + +// LastUsedIn applies the In predicate on the "last_used" field. +func LastUsedIn(vs ...time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldIn(FieldLastUsed, vs...)) +} + +// LastUsedNotIn applies the NotIn predicate on the "last_used" field. +func LastUsedNotIn(vs ...time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldNotIn(FieldLastUsed, vs...)) +} + +// LastUsedGT applies the GT predicate on the "last_used" field. +func LastUsedGT(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGT(FieldLastUsed, v)) +} + +// LastUsedGTE applies the GTE predicate on the "last_used" field. +func LastUsedGTE(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldGTE(FieldLastUsed, v)) +} + +// LastUsedLT applies the LT predicate on the "last_used" field. +func LastUsedLT(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLT(FieldLastUsed, v)) +} + +// LastUsedLTE applies the LTE predicate on the "last_used" field. +func LastUsedLTE(v time.Time) predicate.RefreshToken { + return predicate.RefreshToken(sql.FieldLTE(FieldLastUsed, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.RefreshToken) predicate.RefreshToken { + return predicate.RefreshToken(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for _, p := range predicates { + p(s1) + } + s.Where(s1.P()) + }) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.RefreshToken) predicate.RefreshToken { + return predicate.RefreshToken(func(s *sql.Selector) { + s1 := s.Clone().SetP(nil) + for i, p := range predicates { + if i > 0 { + s1.Or() + } + p(s1) + } + s.Where(s1.P()) + }) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.RefreshToken) predicate.RefreshToken { + return predicate.RefreshToken(func(s *sql.Selector) { + p(s.Not()) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_create.go b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_create.go new file mode 100644 index 00000000..8ad56361 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_create.go @@ -0,0 +1,465 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/refreshtoken" +) + +// RefreshTokenCreate is the builder for creating a RefreshToken entity. +type RefreshTokenCreate struct { + config + mutation *RefreshTokenMutation + hooks []Hook +} + +// SetClientID sets the "client_id" field. +func (rtc *RefreshTokenCreate) SetClientID(s string) *RefreshTokenCreate { + rtc.mutation.SetClientID(s) + return rtc +} + +// SetScopes sets the "scopes" field. +func (rtc *RefreshTokenCreate) SetScopes(s []string) *RefreshTokenCreate { + rtc.mutation.SetScopes(s) + return rtc +} + +// SetNonce sets the "nonce" field. +func (rtc *RefreshTokenCreate) SetNonce(s string) *RefreshTokenCreate { + rtc.mutation.SetNonce(s) + return rtc +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (rtc *RefreshTokenCreate) SetClaimsUserID(s string) *RefreshTokenCreate { + rtc.mutation.SetClaimsUserID(s) + return rtc +} + +// SetClaimsUsername sets the "claims_username" field. +func (rtc *RefreshTokenCreate) SetClaimsUsername(s string) *RefreshTokenCreate { + rtc.mutation.SetClaimsUsername(s) + return rtc +} + +// SetClaimsEmail sets the "claims_email" field. +func (rtc *RefreshTokenCreate) SetClaimsEmail(s string) *RefreshTokenCreate { + rtc.mutation.SetClaimsEmail(s) + return rtc +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (rtc *RefreshTokenCreate) SetClaimsEmailVerified(b bool) *RefreshTokenCreate { + rtc.mutation.SetClaimsEmailVerified(b) + return rtc +} + +// SetClaimsGroups sets the "claims_groups" field. +func (rtc *RefreshTokenCreate) SetClaimsGroups(s []string) *RefreshTokenCreate { + rtc.mutation.SetClaimsGroups(s) + return rtc +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (rtc *RefreshTokenCreate) SetClaimsPreferredUsername(s string) *RefreshTokenCreate { + rtc.mutation.SetClaimsPreferredUsername(s) + return rtc +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (rtc *RefreshTokenCreate) SetNillableClaimsPreferredUsername(s *string) *RefreshTokenCreate { + if s != nil { + rtc.SetClaimsPreferredUsername(*s) + } + return rtc +} + +// SetConnectorID sets the "connector_id" field. +func (rtc *RefreshTokenCreate) SetConnectorID(s string) *RefreshTokenCreate { + rtc.mutation.SetConnectorID(s) + return rtc +} + +// SetConnectorData sets the "connector_data" field. +func (rtc *RefreshTokenCreate) SetConnectorData(b []byte) *RefreshTokenCreate { + rtc.mutation.SetConnectorData(b) + return rtc +} + +// SetToken sets the "token" field. +func (rtc *RefreshTokenCreate) SetToken(s string) *RefreshTokenCreate { + rtc.mutation.SetToken(s) + return rtc +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (rtc *RefreshTokenCreate) SetNillableToken(s *string) *RefreshTokenCreate { + if s != nil { + rtc.SetToken(*s) + } + return rtc +} + +// SetObsoleteToken sets the "obsolete_token" field. +func (rtc *RefreshTokenCreate) SetObsoleteToken(s string) *RefreshTokenCreate { + rtc.mutation.SetObsoleteToken(s) + return rtc +} + +// SetNillableObsoleteToken sets the "obsolete_token" field if the given value is not nil. +func (rtc *RefreshTokenCreate) SetNillableObsoleteToken(s *string) *RefreshTokenCreate { + if s != nil { + rtc.SetObsoleteToken(*s) + } + return rtc +} + +// SetCreatedAt sets the "created_at" field. +func (rtc *RefreshTokenCreate) SetCreatedAt(t time.Time) *RefreshTokenCreate { + rtc.mutation.SetCreatedAt(t) + return rtc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (rtc *RefreshTokenCreate) SetNillableCreatedAt(t *time.Time) *RefreshTokenCreate { + if t != nil { + rtc.SetCreatedAt(*t) + } + return rtc +} + +// SetLastUsed sets the "last_used" field. +func (rtc *RefreshTokenCreate) SetLastUsed(t time.Time) *RefreshTokenCreate { + rtc.mutation.SetLastUsed(t) + return rtc +} + +// SetNillableLastUsed sets the "last_used" field if the given value is not nil. +func (rtc *RefreshTokenCreate) SetNillableLastUsed(t *time.Time) *RefreshTokenCreate { + if t != nil { + rtc.SetLastUsed(*t) + } + return rtc +} + +// SetID sets the "id" field. +func (rtc *RefreshTokenCreate) SetID(s string) *RefreshTokenCreate { + rtc.mutation.SetID(s) + return rtc +} + +// Mutation returns the RefreshTokenMutation object of the builder. +func (rtc *RefreshTokenCreate) Mutation() *RefreshTokenMutation { + return rtc.mutation +} + +// Save creates the RefreshToken in the database. +func (rtc *RefreshTokenCreate) Save(ctx context.Context) (*RefreshToken, error) { + rtc.defaults() + return withHooks(ctx, rtc.sqlSave, rtc.mutation, rtc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (rtc *RefreshTokenCreate) SaveX(ctx context.Context) *RefreshToken { + v, err := rtc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (rtc *RefreshTokenCreate) Exec(ctx context.Context) error { + _, err := rtc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (rtc *RefreshTokenCreate) ExecX(ctx context.Context) { + if err := rtc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (rtc *RefreshTokenCreate) defaults() { + if _, ok := rtc.mutation.ClaimsPreferredUsername(); !ok { + v := refreshtoken.DefaultClaimsPreferredUsername + rtc.mutation.SetClaimsPreferredUsername(v) + } + if _, ok := rtc.mutation.Token(); !ok { + v := refreshtoken.DefaultToken + rtc.mutation.SetToken(v) + } + if _, ok := rtc.mutation.ObsoleteToken(); !ok { + v := refreshtoken.DefaultObsoleteToken + rtc.mutation.SetObsoleteToken(v) + } + if _, ok := rtc.mutation.CreatedAt(); !ok { + v := refreshtoken.DefaultCreatedAt() + rtc.mutation.SetCreatedAt(v) + } + if _, ok := rtc.mutation.LastUsed(); !ok { + v := refreshtoken.DefaultLastUsed() + rtc.mutation.SetLastUsed(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (rtc *RefreshTokenCreate) check() error { + if _, ok := rtc.mutation.ClientID(); !ok { + return &ValidationError{Name: "client_id", err: errors.New(`db: missing required field "RefreshToken.client_id"`)} + } + if v, ok := rtc.mutation.ClientID(); ok { + if err := refreshtoken.ClientIDValidator(v); err != nil { + return &ValidationError{Name: "client_id", err: fmt.Errorf(`db: validator failed for field "RefreshToken.client_id": %w`, err)} + } + } + if _, ok := rtc.mutation.Nonce(); !ok { + return &ValidationError{Name: "nonce", err: errors.New(`db: missing required field "RefreshToken.nonce"`)} + } + if v, ok := rtc.mutation.Nonce(); ok { + if err := refreshtoken.NonceValidator(v); err != nil { + return &ValidationError{Name: "nonce", err: fmt.Errorf(`db: validator failed for field "RefreshToken.nonce": %w`, err)} + } + } + if _, ok := rtc.mutation.ClaimsUserID(); !ok { + return &ValidationError{Name: "claims_user_id", err: errors.New(`db: missing required field "RefreshToken.claims_user_id"`)} + } + if v, ok := rtc.mutation.ClaimsUserID(); ok { + if err := refreshtoken.ClaimsUserIDValidator(v); err != nil { + return &ValidationError{Name: "claims_user_id", err: fmt.Errorf(`db: validator failed for field "RefreshToken.claims_user_id": %w`, err)} + } + } + if _, ok := rtc.mutation.ClaimsUsername(); !ok { + return &ValidationError{Name: "claims_username", err: errors.New(`db: missing required field "RefreshToken.claims_username"`)} + } + if v, ok := rtc.mutation.ClaimsUsername(); ok { + if err := refreshtoken.ClaimsUsernameValidator(v); err != nil { + return &ValidationError{Name: "claims_username", err: fmt.Errorf(`db: validator failed for field "RefreshToken.claims_username": %w`, err)} + } + } + if _, ok := rtc.mutation.ClaimsEmail(); !ok { + return &ValidationError{Name: "claims_email", err: errors.New(`db: missing required field "RefreshToken.claims_email"`)} + } + if v, ok := rtc.mutation.ClaimsEmail(); ok { + if err := refreshtoken.ClaimsEmailValidator(v); err != nil { + return &ValidationError{Name: "claims_email", err: fmt.Errorf(`db: validator failed for field "RefreshToken.claims_email": %w`, err)} + } + } + if _, ok := rtc.mutation.ClaimsEmailVerified(); !ok { + return &ValidationError{Name: "claims_email_verified", err: errors.New(`db: missing required field "RefreshToken.claims_email_verified"`)} + } + if _, ok := rtc.mutation.ClaimsPreferredUsername(); !ok { + return &ValidationError{Name: "claims_preferred_username", err: errors.New(`db: missing required field "RefreshToken.claims_preferred_username"`)} + } + if _, ok := rtc.mutation.ConnectorID(); !ok { + return &ValidationError{Name: "connector_id", err: errors.New(`db: missing required field "RefreshToken.connector_id"`)} + } + if v, ok := rtc.mutation.ConnectorID(); ok { + if err := refreshtoken.ConnectorIDValidator(v); err != nil { + return &ValidationError{Name: "connector_id", err: fmt.Errorf(`db: validator failed for field "RefreshToken.connector_id": %w`, err)} + } + } + if _, ok := rtc.mutation.Token(); !ok { + return &ValidationError{Name: "token", err: errors.New(`db: missing required field "RefreshToken.token"`)} + } + if _, ok := rtc.mutation.ObsoleteToken(); !ok { + return &ValidationError{Name: "obsolete_token", err: errors.New(`db: missing required field "RefreshToken.obsolete_token"`)} + } + if _, ok := rtc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`db: missing required field "RefreshToken.created_at"`)} + } + if _, ok := rtc.mutation.LastUsed(); !ok { + return &ValidationError{Name: "last_used", err: errors.New(`db: missing required field "RefreshToken.last_used"`)} + } + if v, ok := rtc.mutation.ID(); ok { + if err := refreshtoken.IDValidator(v); err != nil { + return &ValidationError{Name: "id", err: fmt.Errorf(`db: validator failed for field "RefreshToken.id": %w`, err)} + } + } + return nil +} + +func (rtc *RefreshTokenCreate) sqlSave(ctx context.Context) (*RefreshToken, error) { + if err := rtc.check(); err != nil { + return nil, err + } + _node, _spec := rtc.createSpec() + if err := sqlgraph.CreateNode(ctx, rtc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected RefreshToken.ID type: %T", _spec.ID.Value) + } + } + rtc.mutation.id = &_node.ID + rtc.mutation.done = true + return _node, nil +} + +func (rtc *RefreshTokenCreate) createSpec() (*RefreshToken, *sqlgraph.CreateSpec) { + var ( + _node = &RefreshToken{config: rtc.config} + _spec = sqlgraph.NewCreateSpec(refreshtoken.Table, sqlgraph.NewFieldSpec(refreshtoken.FieldID, field.TypeString)) + ) + if id, ok := rtc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := rtc.mutation.ClientID(); ok { + _spec.SetField(refreshtoken.FieldClientID, field.TypeString, value) + _node.ClientID = value + } + if value, ok := rtc.mutation.Scopes(); ok { + _spec.SetField(refreshtoken.FieldScopes, field.TypeJSON, value) + _node.Scopes = value + } + if value, ok := rtc.mutation.Nonce(); ok { + _spec.SetField(refreshtoken.FieldNonce, field.TypeString, value) + _node.Nonce = value + } + if value, ok := rtc.mutation.ClaimsUserID(); ok { + _spec.SetField(refreshtoken.FieldClaimsUserID, field.TypeString, value) + _node.ClaimsUserID = value + } + if value, ok := rtc.mutation.ClaimsUsername(); ok { + _spec.SetField(refreshtoken.FieldClaimsUsername, field.TypeString, value) + _node.ClaimsUsername = value + } + if value, ok := rtc.mutation.ClaimsEmail(); ok { + _spec.SetField(refreshtoken.FieldClaimsEmail, field.TypeString, value) + _node.ClaimsEmail = value + } + if value, ok := rtc.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(refreshtoken.FieldClaimsEmailVerified, field.TypeBool, value) + _node.ClaimsEmailVerified = value + } + if value, ok := rtc.mutation.ClaimsGroups(); ok { + _spec.SetField(refreshtoken.FieldClaimsGroups, field.TypeJSON, value) + _node.ClaimsGroups = value + } + if value, ok := rtc.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(refreshtoken.FieldClaimsPreferredUsername, field.TypeString, value) + _node.ClaimsPreferredUsername = value + } + if value, ok := rtc.mutation.ConnectorID(); ok { + _spec.SetField(refreshtoken.FieldConnectorID, field.TypeString, value) + _node.ConnectorID = value + } + if value, ok := rtc.mutation.ConnectorData(); ok { + _spec.SetField(refreshtoken.FieldConnectorData, field.TypeBytes, value) + _node.ConnectorData = &value + } + if value, ok := rtc.mutation.Token(); ok { + _spec.SetField(refreshtoken.FieldToken, field.TypeString, value) + _node.Token = value + } + if value, ok := rtc.mutation.ObsoleteToken(); ok { + _spec.SetField(refreshtoken.FieldObsoleteToken, field.TypeString, value) + _node.ObsoleteToken = value + } + if value, ok := rtc.mutation.CreatedAt(); ok { + _spec.SetField(refreshtoken.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := rtc.mutation.LastUsed(); ok { + _spec.SetField(refreshtoken.FieldLastUsed, field.TypeTime, value) + _node.LastUsed = value + } + return _node, _spec +} + +// RefreshTokenCreateBulk is the builder for creating many RefreshToken entities in bulk. +type RefreshTokenCreateBulk struct { + config + builders []*RefreshTokenCreate +} + +// Save creates the RefreshToken entities in the database. +func (rtcb *RefreshTokenCreateBulk) Save(ctx context.Context) ([]*RefreshToken, error) { + specs := make([]*sqlgraph.CreateSpec, len(rtcb.builders)) + nodes := make([]*RefreshToken, len(rtcb.builders)) + mutators := make([]Mutator, len(rtcb.builders)) + for i := range rtcb.builders { + func(i int, root context.Context) { + builder := rtcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*RefreshTokenMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, rtcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, rtcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, rtcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (rtcb *RefreshTokenCreateBulk) SaveX(ctx context.Context) []*RefreshToken { + v, err := rtcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (rtcb *RefreshTokenCreateBulk) Exec(ctx context.Context) error { + _, err := rtcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (rtcb *RefreshTokenCreateBulk) ExecX(ctx context.Context) { + if err := rtcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_delete.go b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_delete.go new file mode 100644 index 00000000..78c8cbc6 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/predicate" + "github.com/dexidp/dex/storage/ent/db/refreshtoken" +) + +// RefreshTokenDelete is the builder for deleting a RefreshToken entity. +type RefreshTokenDelete struct { + config + hooks []Hook + mutation *RefreshTokenMutation +} + +// Where appends a list predicates to the RefreshTokenDelete builder. +func (rtd *RefreshTokenDelete) Where(ps ...predicate.RefreshToken) *RefreshTokenDelete { + rtd.mutation.Where(ps...) + return rtd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (rtd *RefreshTokenDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, rtd.sqlExec, rtd.mutation, rtd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (rtd *RefreshTokenDelete) ExecX(ctx context.Context) int { + n, err := rtd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (rtd *RefreshTokenDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(refreshtoken.Table, sqlgraph.NewFieldSpec(refreshtoken.FieldID, field.TypeString)) + if ps := rtd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, rtd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + rtd.mutation.done = true + return affected, err +} + +// RefreshTokenDeleteOne is the builder for deleting a single RefreshToken entity. +type RefreshTokenDeleteOne struct { + rtd *RefreshTokenDelete +} + +// Where appends a list predicates to the RefreshTokenDelete builder. +func (rtdo *RefreshTokenDeleteOne) Where(ps ...predicate.RefreshToken) *RefreshTokenDeleteOne { + rtdo.rtd.mutation.Where(ps...) + return rtdo +} + +// Exec executes the deletion query. +func (rtdo *RefreshTokenDeleteOne) Exec(ctx context.Context) error { + n, err := rtdo.rtd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{refreshtoken.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (rtdo *RefreshTokenDeleteOne) ExecX(ctx context.Context) { + if err := rtdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_query.go b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_query.go new file mode 100644 index 00000000..ed6c3cc7 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/predicate" + "github.com/dexidp/dex/storage/ent/db/refreshtoken" +) + +// RefreshTokenQuery is the builder for querying RefreshToken entities. +type RefreshTokenQuery struct { + config + ctx *QueryContext + order []refreshtoken.OrderOption + inters []Interceptor + predicates []predicate.RefreshToken + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the RefreshTokenQuery builder. +func (rtq *RefreshTokenQuery) Where(ps ...predicate.RefreshToken) *RefreshTokenQuery { + rtq.predicates = append(rtq.predicates, ps...) + return rtq +} + +// Limit the number of records to be returned by this query. +func (rtq *RefreshTokenQuery) Limit(limit int) *RefreshTokenQuery { + rtq.ctx.Limit = &limit + return rtq +} + +// Offset to start from. +func (rtq *RefreshTokenQuery) Offset(offset int) *RefreshTokenQuery { + rtq.ctx.Offset = &offset + return rtq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (rtq *RefreshTokenQuery) Unique(unique bool) *RefreshTokenQuery { + rtq.ctx.Unique = &unique + return rtq +} + +// Order specifies how the records should be ordered. +func (rtq *RefreshTokenQuery) Order(o ...refreshtoken.OrderOption) *RefreshTokenQuery { + rtq.order = append(rtq.order, o...) + return rtq +} + +// First returns the first RefreshToken entity from the query. +// Returns a *NotFoundError when no RefreshToken was found. +func (rtq *RefreshTokenQuery) First(ctx context.Context) (*RefreshToken, error) { + nodes, err := rtq.Limit(1).All(setContextOp(ctx, rtq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{refreshtoken.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (rtq *RefreshTokenQuery) FirstX(ctx context.Context) *RefreshToken { + node, err := rtq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first RefreshToken ID from the query. +// Returns a *NotFoundError when no RefreshToken ID was found. +func (rtq *RefreshTokenQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = rtq.Limit(1).IDs(setContextOp(ctx, rtq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{refreshtoken.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (rtq *RefreshTokenQuery) FirstIDX(ctx context.Context) string { + id, err := rtq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single RefreshToken entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one RefreshToken entity is found. +// Returns a *NotFoundError when no RefreshToken entities are found. +func (rtq *RefreshTokenQuery) Only(ctx context.Context) (*RefreshToken, error) { + nodes, err := rtq.Limit(2).All(setContextOp(ctx, rtq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{refreshtoken.Label} + default: + return nil, &NotSingularError{refreshtoken.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (rtq *RefreshTokenQuery) OnlyX(ctx context.Context) *RefreshToken { + node, err := rtq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only RefreshToken ID in the query. +// Returns a *NotSingularError when more than one RefreshToken ID is found. +// Returns a *NotFoundError when no entities are found. +func (rtq *RefreshTokenQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = rtq.Limit(2).IDs(setContextOp(ctx, rtq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{refreshtoken.Label} + default: + err = &NotSingularError{refreshtoken.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (rtq *RefreshTokenQuery) OnlyIDX(ctx context.Context) string { + id, err := rtq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of RefreshTokens. +func (rtq *RefreshTokenQuery) All(ctx context.Context) ([]*RefreshToken, error) { + ctx = setContextOp(ctx, rtq.ctx, "All") + if err := rtq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*RefreshToken, *RefreshTokenQuery]() + return withInterceptors[[]*RefreshToken](ctx, rtq, qr, rtq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (rtq *RefreshTokenQuery) AllX(ctx context.Context) []*RefreshToken { + nodes, err := rtq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of RefreshToken IDs. +func (rtq *RefreshTokenQuery) IDs(ctx context.Context) (ids []string, err error) { + if rtq.ctx.Unique == nil && rtq.path != nil { + rtq.Unique(true) + } + ctx = setContextOp(ctx, rtq.ctx, "IDs") + if err = rtq.Select(refreshtoken.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (rtq *RefreshTokenQuery) IDsX(ctx context.Context) []string { + ids, err := rtq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (rtq *RefreshTokenQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, rtq.ctx, "Count") + if err := rtq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, rtq, querierCount[*RefreshTokenQuery](), rtq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (rtq *RefreshTokenQuery) CountX(ctx context.Context) int { + count, err := rtq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (rtq *RefreshTokenQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, rtq.ctx, "Exist") + switch _, err := rtq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (rtq *RefreshTokenQuery) ExistX(ctx context.Context) bool { + exist, err := rtq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the RefreshTokenQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (rtq *RefreshTokenQuery) Clone() *RefreshTokenQuery { + if rtq == nil { + return nil + } + return &RefreshTokenQuery{ + config: rtq.config, + ctx: rtq.ctx.Clone(), + order: append([]refreshtoken.OrderOption{}, rtq.order...), + inters: append([]Interceptor{}, rtq.inters...), + predicates: append([]predicate.RefreshToken{}, rtq.predicates...), + // clone intermediate query. + sql: rtq.sql.Clone(), + path: rtq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// ClientID string `json:"client_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.RefreshToken.Query(). +// GroupBy(refreshtoken.FieldClientID). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (rtq *RefreshTokenQuery) GroupBy(field string, fields ...string) *RefreshTokenGroupBy { + rtq.ctx.Fields = append([]string{field}, fields...) + grbuild := &RefreshTokenGroupBy{build: rtq} + grbuild.flds = &rtq.ctx.Fields + grbuild.label = refreshtoken.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// ClientID string `json:"client_id,omitempty"` +// } +// +// client.RefreshToken.Query(). +// Select(refreshtoken.FieldClientID). +// Scan(ctx, &v) +func (rtq *RefreshTokenQuery) Select(fields ...string) *RefreshTokenSelect { + rtq.ctx.Fields = append(rtq.ctx.Fields, fields...) + sbuild := &RefreshTokenSelect{RefreshTokenQuery: rtq} + sbuild.label = refreshtoken.Label + sbuild.flds, sbuild.scan = &rtq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a RefreshTokenSelect configured with the given aggregations. +func (rtq *RefreshTokenQuery) Aggregate(fns ...AggregateFunc) *RefreshTokenSelect { + return rtq.Select().Aggregate(fns...) +} + +func (rtq *RefreshTokenQuery) prepareQuery(ctx context.Context) error { + for _, inter := range rtq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, rtq); err != nil { + return err + } + } + } + for _, f := range rtq.ctx.Fields { + if !refreshtoken.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if rtq.path != nil { + prev, err := rtq.path(ctx) + if err != nil { + return err + } + rtq.sql = prev + } + return nil +} + +func (rtq *RefreshTokenQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*RefreshToken, error) { + var ( + nodes = []*RefreshToken{} + _spec = rtq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*RefreshToken).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &RefreshToken{config: rtq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, rtq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (rtq *RefreshTokenQuery) sqlCount(ctx context.Context) (int, error) { + _spec := rtq.querySpec() + _spec.Node.Columns = rtq.ctx.Fields + if len(rtq.ctx.Fields) > 0 { + _spec.Unique = rtq.ctx.Unique != nil && *rtq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, rtq.driver, _spec) +} + +func (rtq *RefreshTokenQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(refreshtoken.Table, refreshtoken.Columns, sqlgraph.NewFieldSpec(refreshtoken.FieldID, field.TypeString)) + _spec.From = rtq.sql + if unique := rtq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if rtq.path != nil { + _spec.Unique = true + } + if fields := rtq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, refreshtoken.FieldID) + for i := range fields { + if fields[i] != refreshtoken.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := rtq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := rtq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := rtq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := rtq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (rtq *RefreshTokenQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(rtq.driver.Dialect()) + t1 := builder.Table(refreshtoken.Table) + columns := rtq.ctx.Fields + if len(columns) == 0 { + columns = refreshtoken.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if rtq.sql != nil { + selector = rtq.sql + selector.Select(selector.Columns(columns...)...) + } + if rtq.ctx.Unique != nil && *rtq.ctx.Unique { + selector.Distinct() + } + for _, p := range rtq.predicates { + p(selector) + } + for _, p := range rtq.order { + p(selector) + } + if offset := rtq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := rtq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// RefreshTokenGroupBy is the group-by builder for RefreshToken entities. +type RefreshTokenGroupBy struct { + selector + build *RefreshTokenQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (rtgb *RefreshTokenGroupBy) Aggregate(fns ...AggregateFunc) *RefreshTokenGroupBy { + rtgb.fns = append(rtgb.fns, fns...) + return rtgb +} + +// Scan applies the selector query and scans the result into the given value. +func (rtgb *RefreshTokenGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, rtgb.build.ctx, "GroupBy") + if err := rtgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*RefreshTokenQuery, *RefreshTokenGroupBy](ctx, rtgb.build, rtgb, rtgb.build.inters, v) +} + +func (rtgb *RefreshTokenGroupBy) sqlScan(ctx context.Context, root *RefreshTokenQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(rtgb.fns)) + for _, fn := range rtgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*rtgb.flds)+len(rtgb.fns)) + for _, f := range *rtgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*rtgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := rtgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// RefreshTokenSelect is the builder for selecting fields of RefreshToken entities. +type RefreshTokenSelect struct { + *RefreshTokenQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (rts *RefreshTokenSelect) Aggregate(fns ...AggregateFunc) *RefreshTokenSelect { + rts.fns = append(rts.fns, fns...) + return rts +} + +// Scan applies the selector query and scans the result into the given value. +func (rts *RefreshTokenSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, rts.ctx, "Select") + if err := rts.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*RefreshTokenQuery, *RefreshTokenSelect](ctx, rts.RefreshTokenQuery, rts, rts.inters, v) +} + +func (rts *RefreshTokenSelect) sqlScan(ctx context.Context, root *RefreshTokenQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(rts.fns)) + for _, fn := range rts.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*rts.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := rts.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_update.go b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_update.go new file mode 100644 index 00000000..6d9e0221 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/refreshtoken_update.go @@ -0,0 +1,701 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/dexidp/dex/storage/ent/db/predicate" + "github.com/dexidp/dex/storage/ent/db/refreshtoken" +) + +// RefreshTokenUpdate is the builder for updating RefreshToken entities. +type RefreshTokenUpdate struct { + config + hooks []Hook + mutation *RefreshTokenMutation +} + +// Where appends a list predicates to the RefreshTokenUpdate builder. +func (rtu *RefreshTokenUpdate) Where(ps ...predicate.RefreshToken) *RefreshTokenUpdate { + rtu.mutation.Where(ps...) + return rtu +} + +// SetClientID sets the "client_id" field. +func (rtu *RefreshTokenUpdate) SetClientID(s string) *RefreshTokenUpdate { + rtu.mutation.SetClientID(s) + return rtu +} + +// SetScopes sets the "scopes" field. +func (rtu *RefreshTokenUpdate) SetScopes(s []string) *RefreshTokenUpdate { + rtu.mutation.SetScopes(s) + return rtu +} + +// AppendScopes appends s to the "scopes" field. +func (rtu *RefreshTokenUpdate) AppendScopes(s []string) *RefreshTokenUpdate { + rtu.mutation.AppendScopes(s) + return rtu +} + +// ClearScopes clears the value of the "scopes" field. +func (rtu *RefreshTokenUpdate) ClearScopes() *RefreshTokenUpdate { + rtu.mutation.ClearScopes() + return rtu +} + +// SetNonce sets the "nonce" field. +func (rtu *RefreshTokenUpdate) SetNonce(s string) *RefreshTokenUpdate { + rtu.mutation.SetNonce(s) + return rtu +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (rtu *RefreshTokenUpdate) SetClaimsUserID(s string) *RefreshTokenUpdate { + rtu.mutation.SetClaimsUserID(s) + return rtu +} + +// SetClaimsUsername sets the "claims_username" field. +func (rtu *RefreshTokenUpdate) SetClaimsUsername(s string) *RefreshTokenUpdate { + rtu.mutation.SetClaimsUsername(s) + return rtu +} + +// SetClaimsEmail sets the "claims_email" field. +func (rtu *RefreshTokenUpdate) SetClaimsEmail(s string) *RefreshTokenUpdate { + rtu.mutation.SetClaimsEmail(s) + return rtu +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (rtu *RefreshTokenUpdate) SetClaimsEmailVerified(b bool) *RefreshTokenUpdate { + rtu.mutation.SetClaimsEmailVerified(b) + return rtu +} + +// SetClaimsGroups sets the "claims_groups" field. +func (rtu *RefreshTokenUpdate) SetClaimsGroups(s []string) *RefreshTokenUpdate { + rtu.mutation.SetClaimsGroups(s) + return rtu +} + +// AppendClaimsGroups appends s to the "claims_groups" field. +func (rtu *RefreshTokenUpdate) AppendClaimsGroups(s []string) *RefreshTokenUpdate { + rtu.mutation.AppendClaimsGroups(s) + return rtu +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (rtu *RefreshTokenUpdate) ClearClaimsGroups() *RefreshTokenUpdate { + rtu.mutation.ClearClaimsGroups() + return rtu +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (rtu *RefreshTokenUpdate) SetClaimsPreferredUsername(s string) *RefreshTokenUpdate { + rtu.mutation.SetClaimsPreferredUsername(s) + return rtu +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (rtu *RefreshTokenUpdate) SetNillableClaimsPreferredUsername(s *string) *RefreshTokenUpdate { + if s != nil { + rtu.SetClaimsPreferredUsername(*s) + } + return rtu +} + +// SetConnectorID sets the "connector_id" field. +func (rtu *RefreshTokenUpdate) SetConnectorID(s string) *RefreshTokenUpdate { + rtu.mutation.SetConnectorID(s) + return rtu +} + +// SetConnectorData sets the "connector_data" field. +func (rtu *RefreshTokenUpdate) SetConnectorData(b []byte) *RefreshTokenUpdate { + rtu.mutation.SetConnectorData(b) + return rtu +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (rtu *RefreshTokenUpdate) ClearConnectorData() *RefreshTokenUpdate { + rtu.mutation.ClearConnectorData() + return rtu +} + +// SetToken sets the "token" field. +func (rtu *RefreshTokenUpdate) SetToken(s string) *RefreshTokenUpdate { + rtu.mutation.SetToken(s) + return rtu +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (rtu *RefreshTokenUpdate) SetNillableToken(s *string) *RefreshTokenUpdate { + if s != nil { + rtu.SetToken(*s) + } + return rtu +} + +// SetObsoleteToken sets the "obsolete_token" field. +func (rtu *RefreshTokenUpdate) SetObsoleteToken(s string) *RefreshTokenUpdate { + rtu.mutation.SetObsoleteToken(s) + return rtu +} + +// SetNillableObsoleteToken sets the "obsolete_token" field if the given value is not nil. +func (rtu *RefreshTokenUpdate) SetNillableObsoleteToken(s *string) *RefreshTokenUpdate { + if s != nil { + rtu.SetObsoleteToken(*s) + } + return rtu +} + +// SetCreatedAt sets the "created_at" field. +func (rtu *RefreshTokenUpdate) SetCreatedAt(t time.Time) *RefreshTokenUpdate { + rtu.mutation.SetCreatedAt(t) + return rtu +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (rtu *RefreshTokenUpdate) SetNillableCreatedAt(t *time.Time) *RefreshTokenUpdate { + if t != nil { + rtu.SetCreatedAt(*t) + } + return rtu +} + +// SetLastUsed sets the "last_used" field. +func (rtu *RefreshTokenUpdate) SetLastUsed(t time.Time) *RefreshTokenUpdate { + rtu.mutation.SetLastUsed(t) + return rtu +} + +// SetNillableLastUsed sets the "last_used" field if the given value is not nil. +func (rtu *RefreshTokenUpdate) SetNillableLastUsed(t *time.Time) *RefreshTokenUpdate { + if t != nil { + rtu.SetLastUsed(*t) + } + return rtu +} + +// Mutation returns the RefreshTokenMutation object of the builder. +func (rtu *RefreshTokenUpdate) Mutation() *RefreshTokenMutation { + return rtu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (rtu *RefreshTokenUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, rtu.sqlSave, rtu.mutation, rtu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (rtu *RefreshTokenUpdate) SaveX(ctx context.Context) int { + affected, err := rtu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (rtu *RefreshTokenUpdate) Exec(ctx context.Context) error { + _, err := rtu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (rtu *RefreshTokenUpdate) ExecX(ctx context.Context) { + if err := rtu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (rtu *RefreshTokenUpdate) check() error { + if v, ok := rtu.mutation.ClientID(); ok { + if err := refreshtoken.ClientIDValidator(v); err != nil { + return &ValidationError{Name: "client_id", err: fmt.Errorf(`db: validator failed for field "RefreshToken.client_id": %w`, err)} + } + } + if v, ok := rtu.mutation.Nonce(); ok { + if err := refreshtoken.NonceValidator(v); err != nil { + return &ValidationError{Name: "nonce", err: fmt.Errorf(`db: validator failed for field "RefreshToken.nonce": %w`, err)} + } + } + if v, ok := rtu.mutation.ClaimsUserID(); ok { + if err := refreshtoken.ClaimsUserIDValidator(v); err != nil { + return &ValidationError{Name: "claims_user_id", err: fmt.Errorf(`db: validator failed for field "RefreshToken.claims_user_id": %w`, err)} + } + } + if v, ok := rtu.mutation.ClaimsUsername(); ok { + if err := refreshtoken.ClaimsUsernameValidator(v); err != nil { + return &ValidationError{Name: "claims_username", err: fmt.Errorf(`db: validator failed for field "RefreshToken.claims_username": %w`, err)} + } + } + if v, ok := rtu.mutation.ClaimsEmail(); ok { + if err := refreshtoken.ClaimsEmailValidator(v); err != nil { + return &ValidationError{Name: "claims_email", err: fmt.Errorf(`db: validator failed for field "RefreshToken.claims_email": %w`, err)} + } + } + if v, ok := rtu.mutation.ConnectorID(); ok { + if err := refreshtoken.ConnectorIDValidator(v); err != nil { + return &ValidationError{Name: "connector_id", err: fmt.Errorf(`db: validator failed for field "RefreshToken.connector_id": %w`, err)} + } + } + return nil +} + +func (rtu *RefreshTokenUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := rtu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(refreshtoken.Table, refreshtoken.Columns, sqlgraph.NewFieldSpec(refreshtoken.FieldID, field.TypeString)) + if ps := rtu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := rtu.mutation.ClientID(); ok { + _spec.SetField(refreshtoken.FieldClientID, field.TypeString, value) + } + if value, ok := rtu.mutation.Scopes(); ok { + _spec.SetField(refreshtoken.FieldScopes, field.TypeJSON, value) + } + if value, ok := rtu.mutation.AppendedScopes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, refreshtoken.FieldScopes, value) + }) + } + if rtu.mutation.ScopesCleared() { + _spec.ClearField(refreshtoken.FieldScopes, field.TypeJSON) + } + if value, ok := rtu.mutation.Nonce(); ok { + _spec.SetField(refreshtoken.FieldNonce, field.TypeString, value) + } + if value, ok := rtu.mutation.ClaimsUserID(); ok { + _spec.SetField(refreshtoken.FieldClaimsUserID, field.TypeString, value) + } + if value, ok := rtu.mutation.ClaimsUsername(); ok { + _spec.SetField(refreshtoken.FieldClaimsUsername, field.TypeString, value) + } + if value, ok := rtu.mutation.ClaimsEmail(); ok { + _spec.SetField(refreshtoken.FieldClaimsEmail, field.TypeString, value) + } + if value, ok := rtu.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(refreshtoken.FieldClaimsEmailVerified, field.TypeBool, value) + } + if value, ok := rtu.mutation.ClaimsGroups(); ok { + _spec.SetField(refreshtoken.FieldClaimsGroups, field.TypeJSON, value) + } + if value, ok := rtu.mutation.AppendedClaimsGroups(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, refreshtoken.FieldClaimsGroups, value) + }) + } + if rtu.mutation.ClaimsGroupsCleared() { + _spec.ClearField(refreshtoken.FieldClaimsGroups, field.TypeJSON) + } + if value, ok := rtu.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(refreshtoken.FieldClaimsPreferredUsername, field.TypeString, value) + } + if value, ok := rtu.mutation.ConnectorID(); ok { + _spec.SetField(refreshtoken.FieldConnectorID, field.TypeString, value) + } + if value, ok := rtu.mutation.ConnectorData(); ok { + _spec.SetField(refreshtoken.FieldConnectorData, field.TypeBytes, value) + } + if rtu.mutation.ConnectorDataCleared() { + _spec.ClearField(refreshtoken.FieldConnectorData, field.TypeBytes) + } + if value, ok := rtu.mutation.Token(); ok { + _spec.SetField(refreshtoken.FieldToken, field.TypeString, value) + } + if value, ok := rtu.mutation.ObsoleteToken(); ok { + _spec.SetField(refreshtoken.FieldObsoleteToken, field.TypeString, value) + } + if value, ok := rtu.mutation.CreatedAt(); ok { + _spec.SetField(refreshtoken.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := rtu.mutation.LastUsed(); ok { + _spec.SetField(refreshtoken.FieldLastUsed, field.TypeTime, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, rtu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{refreshtoken.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + rtu.mutation.done = true + return n, nil +} + +// RefreshTokenUpdateOne is the builder for updating a single RefreshToken entity. +type RefreshTokenUpdateOne struct { + config + fields []string + hooks []Hook + mutation *RefreshTokenMutation +} + +// SetClientID sets the "client_id" field. +func (rtuo *RefreshTokenUpdateOne) SetClientID(s string) *RefreshTokenUpdateOne { + rtuo.mutation.SetClientID(s) + return rtuo +} + +// SetScopes sets the "scopes" field. +func (rtuo *RefreshTokenUpdateOne) SetScopes(s []string) *RefreshTokenUpdateOne { + rtuo.mutation.SetScopes(s) + return rtuo +} + +// AppendScopes appends s to the "scopes" field. +func (rtuo *RefreshTokenUpdateOne) AppendScopes(s []string) *RefreshTokenUpdateOne { + rtuo.mutation.AppendScopes(s) + return rtuo +} + +// ClearScopes clears the value of the "scopes" field. +func (rtuo *RefreshTokenUpdateOne) ClearScopes() *RefreshTokenUpdateOne { + rtuo.mutation.ClearScopes() + return rtuo +} + +// SetNonce sets the "nonce" field. +func (rtuo *RefreshTokenUpdateOne) SetNonce(s string) *RefreshTokenUpdateOne { + rtuo.mutation.SetNonce(s) + return rtuo +} + +// SetClaimsUserID sets the "claims_user_id" field. +func (rtuo *RefreshTokenUpdateOne) SetClaimsUserID(s string) *RefreshTokenUpdateOne { + rtuo.mutation.SetClaimsUserID(s) + return rtuo +} + +// SetClaimsUsername sets the "claims_username" field. +func (rtuo *RefreshTokenUpdateOne) SetClaimsUsername(s string) *RefreshTokenUpdateOne { + rtuo.mutation.SetClaimsUsername(s) + return rtuo +} + +// SetClaimsEmail sets the "claims_email" field. +func (rtuo *RefreshTokenUpdateOne) SetClaimsEmail(s string) *RefreshTokenUpdateOne { + rtuo.mutation.SetClaimsEmail(s) + return rtuo +} + +// SetClaimsEmailVerified sets the "claims_email_verified" field. +func (rtuo *RefreshTokenUpdateOne) SetClaimsEmailVerified(b bool) *RefreshTokenUpdateOne { + rtuo.mutation.SetClaimsEmailVerified(b) + return rtuo +} + +// SetClaimsGroups sets the "claims_groups" field. +func (rtuo *RefreshTokenUpdateOne) SetClaimsGroups(s []string) *RefreshTokenUpdateOne { + rtuo.mutation.SetClaimsGroups(s) + return rtuo +} + +// AppendClaimsGroups appends s to the "claims_groups" field. +func (rtuo *RefreshTokenUpdateOne) AppendClaimsGroups(s []string) *RefreshTokenUpdateOne { + rtuo.mutation.AppendClaimsGroups(s) + return rtuo +} + +// ClearClaimsGroups clears the value of the "claims_groups" field. +func (rtuo *RefreshTokenUpdateOne) ClearClaimsGroups() *RefreshTokenUpdateOne { + rtuo.mutation.ClearClaimsGroups() + return rtuo +} + +// SetClaimsPreferredUsername sets the "claims_preferred_username" field. +func (rtuo *RefreshTokenUpdateOne) SetClaimsPreferredUsername(s string) *RefreshTokenUpdateOne { + rtuo.mutation.SetClaimsPreferredUsername(s) + return rtuo +} + +// SetNillableClaimsPreferredUsername sets the "claims_preferred_username" field if the given value is not nil. +func (rtuo *RefreshTokenUpdateOne) SetNillableClaimsPreferredUsername(s *string) *RefreshTokenUpdateOne { + if s != nil { + rtuo.SetClaimsPreferredUsername(*s) + } + return rtuo +} + +// SetConnectorID sets the "connector_id" field. +func (rtuo *RefreshTokenUpdateOne) SetConnectorID(s string) *RefreshTokenUpdateOne { + rtuo.mutation.SetConnectorID(s) + return rtuo +} + +// SetConnectorData sets the "connector_data" field. +func (rtuo *RefreshTokenUpdateOne) SetConnectorData(b []byte) *RefreshTokenUpdateOne { + rtuo.mutation.SetConnectorData(b) + return rtuo +} + +// ClearConnectorData clears the value of the "connector_data" field. +func (rtuo *RefreshTokenUpdateOne) ClearConnectorData() *RefreshTokenUpdateOne { + rtuo.mutation.ClearConnectorData() + return rtuo +} + +// SetToken sets the "token" field. +func (rtuo *RefreshTokenUpdateOne) SetToken(s string) *RefreshTokenUpdateOne { + rtuo.mutation.SetToken(s) + return rtuo +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (rtuo *RefreshTokenUpdateOne) SetNillableToken(s *string) *RefreshTokenUpdateOne { + if s != nil { + rtuo.SetToken(*s) + } + return rtuo +} + +// SetObsoleteToken sets the "obsolete_token" field. +func (rtuo *RefreshTokenUpdateOne) SetObsoleteToken(s string) *RefreshTokenUpdateOne { + rtuo.mutation.SetObsoleteToken(s) + return rtuo +} + +// SetNillableObsoleteToken sets the "obsolete_token" field if the given value is not nil. +func (rtuo *RefreshTokenUpdateOne) SetNillableObsoleteToken(s *string) *RefreshTokenUpdateOne { + if s != nil { + rtuo.SetObsoleteToken(*s) + } + return rtuo +} + +// SetCreatedAt sets the "created_at" field. +func (rtuo *RefreshTokenUpdateOne) SetCreatedAt(t time.Time) *RefreshTokenUpdateOne { + rtuo.mutation.SetCreatedAt(t) + return rtuo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (rtuo *RefreshTokenUpdateOne) SetNillableCreatedAt(t *time.Time) *RefreshTokenUpdateOne { + if t != nil { + rtuo.SetCreatedAt(*t) + } + return rtuo +} + +// SetLastUsed sets the "last_used" field. +func (rtuo *RefreshTokenUpdateOne) SetLastUsed(t time.Time) *RefreshTokenUpdateOne { + rtuo.mutation.SetLastUsed(t) + return rtuo +} + +// SetNillableLastUsed sets the "last_used" field if the given value is not nil. +func (rtuo *RefreshTokenUpdateOne) SetNillableLastUsed(t *time.Time) *RefreshTokenUpdateOne { + if t != nil { + rtuo.SetLastUsed(*t) + } + return rtuo +} + +// Mutation returns the RefreshTokenMutation object of the builder. +func (rtuo *RefreshTokenUpdateOne) Mutation() *RefreshTokenMutation { + return rtuo.mutation +} + +// Where appends a list predicates to the RefreshTokenUpdate builder. +func (rtuo *RefreshTokenUpdateOne) Where(ps ...predicate.RefreshToken) *RefreshTokenUpdateOne { + rtuo.mutation.Where(ps...) + return rtuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (rtuo *RefreshTokenUpdateOne) Select(field string, fields ...string) *RefreshTokenUpdateOne { + rtuo.fields = append([]string{field}, fields...) + return rtuo +} + +// Save executes the query and returns the updated RefreshToken entity. +func (rtuo *RefreshTokenUpdateOne) Save(ctx context.Context) (*RefreshToken, error) { + return withHooks(ctx, rtuo.sqlSave, rtuo.mutation, rtuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (rtuo *RefreshTokenUpdateOne) SaveX(ctx context.Context) *RefreshToken { + node, err := rtuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (rtuo *RefreshTokenUpdateOne) Exec(ctx context.Context) error { + _, err := rtuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (rtuo *RefreshTokenUpdateOne) ExecX(ctx context.Context) { + if err := rtuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (rtuo *RefreshTokenUpdateOne) check() error { + if v, ok := rtuo.mutation.ClientID(); ok { + if err := refreshtoken.ClientIDValidator(v); err != nil { + return &ValidationError{Name: "client_id", err: fmt.Errorf(`db: validator failed for field "RefreshToken.client_id": %w`, err)} + } + } + if v, ok := rtuo.mutation.Nonce(); ok { + if err := refreshtoken.NonceValidator(v); err != nil { + return &ValidationError{Name: "nonce", err: fmt.Errorf(`db: validator failed for field "RefreshToken.nonce": %w`, err)} + } + } + if v, ok := rtuo.mutation.ClaimsUserID(); ok { + if err := refreshtoken.ClaimsUserIDValidator(v); err != nil { + return &ValidationError{Name: "claims_user_id", err: fmt.Errorf(`db: validator failed for field "RefreshToken.claims_user_id": %w`, err)} + } + } + if v, ok := rtuo.mutation.ClaimsUsername(); ok { + if err := refreshtoken.ClaimsUsernameValidator(v); err != nil { + return &ValidationError{Name: "claims_username", err: fmt.Errorf(`db: validator failed for field "RefreshToken.claims_username": %w`, err)} + } + } + if v, ok := rtuo.mutation.ClaimsEmail(); ok { + if err := refreshtoken.ClaimsEmailValidator(v); err != nil { + return &ValidationError{Name: "claims_email", err: fmt.Errorf(`db: validator failed for field "RefreshToken.claims_email": %w`, err)} + } + } + if v, ok := rtuo.mutation.ConnectorID(); ok { + if err := refreshtoken.ConnectorIDValidator(v); err != nil { + return &ValidationError{Name: "connector_id", err: fmt.Errorf(`db: validator failed for field "RefreshToken.connector_id": %w`, err)} + } + } + return nil +} + +func (rtuo *RefreshTokenUpdateOne) sqlSave(ctx context.Context) (_node *RefreshToken, err error) { + if err := rtuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(refreshtoken.Table, refreshtoken.Columns, sqlgraph.NewFieldSpec(refreshtoken.FieldID, field.TypeString)) + id, ok := rtuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "RefreshToken.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := rtuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, refreshtoken.FieldID) + for _, f := range fields { + if !refreshtoken.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != refreshtoken.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := rtuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := rtuo.mutation.ClientID(); ok { + _spec.SetField(refreshtoken.FieldClientID, field.TypeString, value) + } + if value, ok := rtuo.mutation.Scopes(); ok { + _spec.SetField(refreshtoken.FieldScopes, field.TypeJSON, value) + } + if value, ok := rtuo.mutation.AppendedScopes(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, refreshtoken.FieldScopes, value) + }) + } + if rtuo.mutation.ScopesCleared() { + _spec.ClearField(refreshtoken.FieldScopes, field.TypeJSON) + } + if value, ok := rtuo.mutation.Nonce(); ok { + _spec.SetField(refreshtoken.FieldNonce, field.TypeString, value) + } + if value, ok := rtuo.mutation.ClaimsUserID(); ok { + _spec.SetField(refreshtoken.FieldClaimsUserID, field.TypeString, value) + } + if value, ok := rtuo.mutation.ClaimsUsername(); ok { + _spec.SetField(refreshtoken.FieldClaimsUsername, field.TypeString, value) + } + if value, ok := rtuo.mutation.ClaimsEmail(); ok { + _spec.SetField(refreshtoken.FieldClaimsEmail, field.TypeString, value) + } + if value, ok := rtuo.mutation.ClaimsEmailVerified(); ok { + _spec.SetField(refreshtoken.FieldClaimsEmailVerified, field.TypeBool, value) + } + if value, ok := rtuo.mutation.ClaimsGroups(); ok { + _spec.SetField(refreshtoken.FieldClaimsGroups, field.TypeJSON, value) + } + if value, ok := rtuo.mutation.AppendedClaimsGroups(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, refreshtoken.FieldClaimsGroups, value) + }) + } + if rtuo.mutation.ClaimsGroupsCleared() { + _spec.ClearField(refreshtoken.FieldClaimsGroups, field.TypeJSON) + } + if value, ok := rtuo.mutation.ClaimsPreferredUsername(); ok { + _spec.SetField(refreshtoken.FieldClaimsPreferredUsername, field.TypeString, value) + } + if value, ok := rtuo.mutation.ConnectorID(); ok { + _spec.SetField(refreshtoken.FieldConnectorID, field.TypeString, value) + } + if value, ok := rtuo.mutation.ConnectorData(); ok { + _spec.SetField(refreshtoken.FieldConnectorData, field.TypeBytes, value) + } + if rtuo.mutation.ConnectorDataCleared() { + _spec.ClearField(refreshtoken.FieldConnectorData, field.TypeBytes) + } + if value, ok := rtuo.mutation.Token(); ok { + _spec.SetField(refreshtoken.FieldToken, field.TypeString, value) + } + if value, ok := rtuo.mutation.ObsoleteToken(); ok { + _spec.SetField(refreshtoken.FieldObsoleteToken, field.TypeString, value) + } + if value, ok := rtuo.mutation.CreatedAt(); ok { + _spec.SetField(refreshtoken.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := rtuo.mutation.LastUsed(); ok { + _spec.SetField(refreshtoken.FieldLastUsed, field.TypeTime, value) + } + _node = &RefreshToken{config: rtuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, rtuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{refreshtoken.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + rtuo.mutation.done = true + return _node, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/runtime.go b/vendor/github.com/dexidp/dex/storage/ent/db/runtime.go new file mode 100644 index 00000000..797c9761 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/runtime.go @@ -0,0 +1,269 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "time" + + "github.com/dexidp/dex/storage/ent/db/authcode" + "github.com/dexidp/dex/storage/ent/db/authrequest" + "github.com/dexidp/dex/storage/ent/db/connector" + "github.com/dexidp/dex/storage/ent/db/devicerequest" + "github.com/dexidp/dex/storage/ent/db/devicetoken" + "github.com/dexidp/dex/storage/ent/db/keys" + "github.com/dexidp/dex/storage/ent/db/oauth2client" + "github.com/dexidp/dex/storage/ent/db/offlinesession" + "github.com/dexidp/dex/storage/ent/db/password" + "github.com/dexidp/dex/storage/ent/db/refreshtoken" + "github.com/dexidp/dex/storage/ent/schema" +) + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { + authcodeFields := schema.AuthCode{}.Fields() + _ = authcodeFields + // authcodeDescClientID is the schema descriptor for client_id field. + authcodeDescClientID := authcodeFields[1].Descriptor() + // authcode.ClientIDValidator is a validator for the "client_id" field. It is called by the builders before save. + authcode.ClientIDValidator = authcodeDescClientID.Validators[0].(func(string) error) + // authcodeDescNonce is the schema descriptor for nonce field. + authcodeDescNonce := authcodeFields[3].Descriptor() + // authcode.NonceValidator is a validator for the "nonce" field. It is called by the builders before save. + authcode.NonceValidator = authcodeDescNonce.Validators[0].(func(string) error) + // authcodeDescRedirectURI is the schema descriptor for redirect_uri field. + authcodeDescRedirectURI := authcodeFields[4].Descriptor() + // authcode.RedirectURIValidator is a validator for the "redirect_uri" field. It is called by the builders before save. + authcode.RedirectURIValidator = authcodeDescRedirectURI.Validators[0].(func(string) error) + // authcodeDescClaimsUserID is the schema descriptor for claims_user_id field. + authcodeDescClaimsUserID := authcodeFields[5].Descriptor() + // authcode.ClaimsUserIDValidator is a validator for the "claims_user_id" field. It is called by the builders before save. + authcode.ClaimsUserIDValidator = authcodeDescClaimsUserID.Validators[0].(func(string) error) + // authcodeDescClaimsUsername is the schema descriptor for claims_username field. + authcodeDescClaimsUsername := authcodeFields[6].Descriptor() + // authcode.ClaimsUsernameValidator is a validator for the "claims_username" field. It is called by the builders before save. + authcode.ClaimsUsernameValidator = authcodeDescClaimsUsername.Validators[0].(func(string) error) + // authcodeDescClaimsEmail is the schema descriptor for claims_email field. + authcodeDescClaimsEmail := authcodeFields[7].Descriptor() + // authcode.ClaimsEmailValidator is a validator for the "claims_email" field. It is called by the builders before save. + authcode.ClaimsEmailValidator = authcodeDescClaimsEmail.Validators[0].(func(string) error) + // authcodeDescClaimsPreferredUsername is the schema descriptor for claims_preferred_username field. + authcodeDescClaimsPreferredUsername := authcodeFields[10].Descriptor() + // authcode.DefaultClaimsPreferredUsername holds the default value on creation for the claims_preferred_username field. + authcode.DefaultClaimsPreferredUsername = authcodeDescClaimsPreferredUsername.Default.(string) + // authcodeDescConnectorID is the schema descriptor for connector_id field. + authcodeDescConnectorID := authcodeFields[11].Descriptor() + // authcode.ConnectorIDValidator is a validator for the "connector_id" field. It is called by the builders before save. + authcode.ConnectorIDValidator = authcodeDescConnectorID.Validators[0].(func(string) error) + // authcodeDescCodeChallenge is the schema descriptor for code_challenge field. + authcodeDescCodeChallenge := authcodeFields[14].Descriptor() + // authcode.DefaultCodeChallenge holds the default value on creation for the code_challenge field. + authcode.DefaultCodeChallenge = authcodeDescCodeChallenge.Default.(string) + // authcodeDescCodeChallengeMethod is the schema descriptor for code_challenge_method field. + authcodeDescCodeChallengeMethod := authcodeFields[15].Descriptor() + // authcode.DefaultCodeChallengeMethod holds the default value on creation for the code_challenge_method field. + authcode.DefaultCodeChallengeMethod = authcodeDescCodeChallengeMethod.Default.(string) + // authcodeDescID is the schema descriptor for id field. + authcodeDescID := authcodeFields[0].Descriptor() + // authcode.IDValidator is a validator for the "id" field. It is called by the builders before save. + authcode.IDValidator = authcodeDescID.Validators[0].(func(string) error) + authrequestFields := schema.AuthRequest{}.Fields() + _ = authrequestFields + // authrequestDescClaimsPreferredUsername is the schema descriptor for claims_preferred_username field. + authrequestDescClaimsPreferredUsername := authrequestFields[14].Descriptor() + // authrequest.DefaultClaimsPreferredUsername holds the default value on creation for the claims_preferred_username field. + authrequest.DefaultClaimsPreferredUsername = authrequestDescClaimsPreferredUsername.Default.(string) + // authrequestDescCodeChallenge is the schema descriptor for code_challenge field. + authrequestDescCodeChallenge := authrequestFields[18].Descriptor() + // authrequest.DefaultCodeChallenge holds the default value on creation for the code_challenge field. + authrequest.DefaultCodeChallenge = authrequestDescCodeChallenge.Default.(string) + // authrequestDescCodeChallengeMethod is the schema descriptor for code_challenge_method field. + authrequestDescCodeChallengeMethod := authrequestFields[19].Descriptor() + // authrequest.DefaultCodeChallengeMethod holds the default value on creation for the code_challenge_method field. + authrequest.DefaultCodeChallengeMethod = authrequestDescCodeChallengeMethod.Default.(string) + // authrequestDescID is the schema descriptor for id field. + authrequestDescID := authrequestFields[0].Descriptor() + // authrequest.IDValidator is a validator for the "id" field. It is called by the builders before save. + authrequest.IDValidator = authrequestDescID.Validators[0].(func(string) error) + connectorFields := schema.Connector{}.Fields() + _ = connectorFields + // connectorDescType is the schema descriptor for type field. + connectorDescType := connectorFields[1].Descriptor() + // connector.TypeValidator is a validator for the "type" field. It is called by the builders before save. + connector.TypeValidator = connectorDescType.Validators[0].(func(string) error) + // connectorDescName is the schema descriptor for name field. + connectorDescName := connectorFields[2].Descriptor() + // connector.NameValidator is a validator for the "name" field. It is called by the builders before save. + connector.NameValidator = connectorDescName.Validators[0].(func(string) error) + // connectorDescID is the schema descriptor for id field. + connectorDescID := connectorFields[0].Descriptor() + // connector.IDValidator is a validator for the "id" field. It is called by the builders before save. + connector.IDValidator = func() func(string) error { + validators := connectorDescID.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(id string) error { + for _, fn := range fns { + if err := fn(id); err != nil { + return err + } + } + return nil + } + }() + devicerequestFields := schema.DeviceRequest{}.Fields() + _ = devicerequestFields + // devicerequestDescUserCode is the schema descriptor for user_code field. + devicerequestDescUserCode := devicerequestFields[0].Descriptor() + // devicerequest.UserCodeValidator is a validator for the "user_code" field. It is called by the builders before save. + devicerequest.UserCodeValidator = devicerequestDescUserCode.Validators[0].(func(string) error) + // devicerequestDescDeviceCode is the schema descriptor for device_code field. + devicerequestDescDeviceCode := devicerequestFields[1].Descriptor() + // devicerequest.DeviceCodeValidator is a validator for the "device_code" field. It is called by the builders before save. + devicerequest.DeviceCodeValidator = devicerequestDescDeviceCode.Validators[0].(func(string) error) + // devicerequestDescClientID is the schema descriptor for client_id field. + devicerequestDescClientID := devicerequestFields[2].Descriptor() + // devicerequest.ClientIDValidator is a validator for the "client_id" field. It is called by the builders before save. + devicerequest.ClientIDValidator = devicerequestDescClientID.Validators[0].(func(string) error) + // devicerequestDescClientSecret is the schema descriptor for client_secret field. + devicerequestDescClientSecret := devicerequestFields[3].Descriptor() + // devicerequest.ClientSecretValidator is a validator for the "client_secret" field. It is called by the builders before save. + devicerequest.ClientSecretValidator = devicerequestDescClientSecret.Validators[0].(func(string) error) + devicetokenFields := schema.DeviceToken{}.Fields() + _ = devicetokenFields + // devicetokenDescDeviceCode is the schema descriptor for device_code field. + devicetokenDescDeviceCode := devicetokenFields[0].Descriptor() + // devicetoken.DeviceCodeValidator is a validator for the "device_code" field. It is called by the builders before save. + devicetoken.DeviceCodeValidator = devicetokenDescDeviceCode.Validators[0].(func(string) error) + // devicetokenDescStatus is the schema descriptor for status field. + devicetokenDescStatus := devicetokenFields[1].Descriptor() + // devicetoken.StatusValidator is a validator for the "status" field. It is called by the builders before save. + devicetoken.StatusValidator = devicetokenDescStatus.Validators[0].(func(string) error) + // devicetokenDescCodeChallenge is the schema descriptor for code_challenge field. + devicetokenDescCodeChallenge := devicetokenFields[6].Descriptor() + // devicetoken.DefaultCodeChallenge holds the default value on creation for the code_challenge field. + devicetoken.DefaultCodeChallenge = devicetokenDescCodeChallenge.Default.(string) + // devicetokenDescCodeChallengeMethod is the schema descriptor for code_challenge_method field. + devicetokenDescCodeChallengeMethod := devicetokenFields[7].Descriptor() + // devicetoken.DefaultCodeChallengeMethod holds the default value on creation for the code_challenge_method field. + devicetoken.DefaultCodeChallengeMethod = devicetokenDescCodeChallengeMethod.Default.(string) + keysFields := schema.Keys{}.Fields() + _ = keysFields + // keysDescID is the schema descriptor for id field. + keysDescID := keysFields[0].Descriptor() + // keys.IDValidator is a validator for the "id" field. It is called by the builders before save. + keys.IDValidator = keysDescID.Validators[0].(func(string) error) + oauth2clientFields := schema.OAuth2Client{}.Fields() + _ = oauth2clientFields + // oauth2clientDescSecret is the schema descriptor for secret field. + oauth2clientDescSecret := oauth2clientFields[1].Descriptor() + // oauth2client.SecretValidator is a validator for the "secret" field. It is called by the builders before save. + oauth2client.SecretValidator = oauth2clientDescSecret.Validators[0].(func(string) error) + // oauth2clientDescName is the schema descriptor for name field. + oauth2clientDescName := oauth2clientFields[5].Descriptor() + // oauth2client.NameValidator is a validator for the "name" field. It is called by the builders before save. + oauth2client.NameValidator = oauth2clientDescName.Validators[0].(func(string) error) + // oauth2clientDescLogoURL is the schema descriptor for logo_url field. + oauth2clientDescLogoURL := oauth2clientFields[6].Descriptor() + // oauth2client.LogoURLValidator is a validator for the "logo_url" field. It is called by the builders before save. + oauth2client.LogoURLValidator = oauth2clientDescLogoURL.Validators[0].(func(string) error) + // oauth2clientDescID is the schema descriptor for id field. + oauth2clientDescID := oauth2clientFields[0].Descriptor() + // oauth2client.IDValidator is a validator for the "id" field. It is called by the builders before save. + oauth2client.IDValidator = func() func(string) error { + validators := oauth2clientDescID.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(id string) error { + for _, fn := range fns { + if err := fn(id); err != nil { + return err + } + } + return nil + } + }() + offlinesessionFields := schema.OfflineSession{}.Fields() + _ = offlinesessionFields + // offlinesessionDescUserID is the schema descriptor for user_id field. + offlinesessionDescUserID := offlinesessionFields[1].Descriptor() + // offlinesession.UserIDValidator is a validator for the "user_id" field. It is called by the builders before save. + offlinesession.UserIDValidator = offlinesessionDescUserID.Validators[0].(func(string) error) + // offlinesessionDescConnID is the schema descriptor for conn_id field. + offlinesessionDescConnID := offlinesessionFields[2].Descriptor() + // offlinesession.ConnIDValidator is a validator for the "conn_id" field. It is called by the builders before save. + offlinesession.ConnIDValidator = offlinesessionDescConnID.Validators[0].(func(string) error) + // offlinesessionDescID is the schema descriptor for id field. + offlinesessionDescID := offlinesessionFields[0].Descriptor() + // offlinesession.IDValidator is a validator for the "id" field. It is called by the builders before save. + offlinesession.IDValidator = offlinesessionDescID.Validators[0].(func(string) error) + passwordFields := schema.Password{}.Fields() + _ = passwordFields + // passwordDescEmail is the schema descriptor for email field. + passwordDescEmail := passwordFields[0].Descriptor() + // password.EmailValidator is a validator for the "email" field. It is called by the builders before save. + password.EmailValidator = passwordDescEmail.Validators[0].(func(string) error) + // passwordDescUsername is the schema descriptor for username field. + passwordDescUsername := passwordFields[2].Descriptor() + // password.UsernameValidator is a validator for the "username" field. It is called by the builders before save. + password.UsernameValidator = passwordDescUsername.Validators[0].(func(string) error) + // passwordDescUserID is the schema descriptor for user_id field. + passwordDescUserID := passwordFields[3].Descriptor() + // password.UserIDValidator is a validator for the "user_id" field. It is called by the builders before save. + password.UserIDValidator = passwordDescUserID.Validators[0].(func(string) error) + refreshtokenFields := schema.RefreshToken{}.Fields() + _ = refreshtokenFields + // refreshtokenDescClientID is the schema descriptor for client_id field. + refreshtokenDescClientID := refreshtokenFields[1].Descriptor() + // refreshtoken.ClientIDValidator is a validator for the "client_id" field. It is called by the builders before save. + refreshtoken.ClientIDValidator = refreshtokenDescClientID.Validators[0].(func(string) error) + // refreshtokenDescNonce is the schema descriptor for nonce field. + refreshtokenDescNonce := refreshtokenFields[3].Descriptor() + // refreshtoken.NonceValidator is a validator for the "nonce" field. It is called by the builders before save. + refreshtoken.NonceValidator = refreshtokenDescNonce.Validators[0].(func(string) error) + // refreshtokenDescClaimsUserID is the schema descriptor for claims_user_id field. + refreshtokenDescClaimsUserID := refreshtokenFields[4].Descriptor() + // refreshtoken.ClaimsUserIDValidator is a validator for the "claims_user_id" field. It is called by the builders before save. + refreshtoken.ClaimsUserIDValidator = refreshtokenDescClaimsUserID.Validators[0].(func(string) error) + // refreshtokenDescClaimsUsername is the schema descriptor for claims_username field. + refreshtokenDescClaimsUsername := refreshtokenFields[5].Descriptor() + // refreshtoken.ClaimsUsernameValidator is a validator for the "claims_username" field. It is called by the builders before save. + refreshtoken.ClaimsUsernameValidator = refreshtokenDescClaimsUsername.Validators[0].(func(string) error) + // refreshtokenDescClaimsEmail is the schema descriptor for claims_email field. + refreshtokenDescClaimsEmail := refreshtokenFields[6].Descriptor() + // refreshtoken.ClaimsEmailValidator is a validator for the "claims_email" field. It is called by the builders before save. + refreshtoken.ClaimsEmailValidator = refreshtokenDescClaimsEmail.Validators[0].(func(string) error) + // refreshtokenDescClaimsPreferredUsername is the schema descriptor for claims_preferred_username field. + refreshtokenDescClaimsPreferredUsername := refreshtokenFields[9].Descriptor() + // refreshtoken.DefaultClaimsPreferredUsername holds the default value on creation for the claims_preferred_username field. + refreshtoken.DefaultClaimsPreferredUsername = refreshtokenDescClaimsPreferredUsername.Default.(string) + // refreshtokenDescConnectorID is the schema descriptor for connector_id field. + refreshtokenDescConnectorID := refreshtokenFields[10].Descriptor() + // refreshtoken.ConnectorIDValidator is a validator for the "connector_id" field. It is called by the builders before save. + refreshtoken.ConnectorIDValidator = refreshtokenDescConnectorID.Validators[0].(func(string) error) + // refreshtokenDescToken is the schema descriptor for token field. + refreshtokenDescToken := refreshtokenFields[12].Descriptor() + // refreshtoken.DefaultToken holds the default value on creation for the token field. + refreshtoken.DefaultToken = refreshtokenDescToken.Default.(string) + // refreshtokenDescObsoleteToken is the schema descriptor for obsolete_token field. + refreshtokenDescObsoleteToken := refreshtokenFields[13].Descriptor() + // refreshtoken.DefaultObsoleteToken holds the default value on creation for the obsolete_token field. + refreshtoken.DefaultObsoleteToken = refreshtokenDescObsoleteToken.Default.(string) + // refreshtokenDescCreatedAt is the schema descriptor for created_at field. + refreshtokenDescCreatedAt := refreshtokenFields[14].Descriptor() + // refreshtoken.DefaultCreatedAt holds the default value on creation for the created_at field. + refreshtoken.DefaultCreatedAt = refreshtokenDescCreatedAt.Default.(func() time.Time) + // refreshtokenDescLastUsed is the schema descriptor for last_used field. + refreshtokenDescLastUsed := refreshtokenFields[15].Descriptor() + // refreshtoken.DefaultLastUsed holds the default value on creation for the last_used field. + refreshtoken.DefaultLastUsed = refreshtokenDescLastUsed.Default.(func() time.Time) + // refreshtokenDescID is the schema descriptor for id field. + refreshtokenDescID := refreshtokenFields[0].Descriptor() + // refreshtoken.IDValidator is a validator for the "id" field. It is called by the builders before save. + refreshtoken.IDValidator = refreshtokenDescID.Validators[0].(func(string) error) +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/db/tx.go b/vendor/github.com/dexidp/dex/storage/ent/db/tx.go new file mode 100644 index 00000000..42ba241a --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/db/tx.go @@ -0,0 +1,237 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // AuthCode is the client for interacting with the AuthCode builders. + AuthCode *AuthCodeClient + // AuthRequest is the client for interacting with the AuthRequest builders. + AuthRequest *AuthRequestClient + // Connector is the client for interacting with the Connector builders. + Connector *ConnectorClient + // DeviceRequest is the client for interacting with the DeviceRequest builders. + DeviceRequest *DeviceRequestClient + // DeviceToken is the client for interacting with the DeviceToken builders. + DeviceToken *DeviceTokenClient + // Keys is the client for interacting with the Keys builders. + Keys *KeysClient + // OAuth2Client is the client for interacting with the OAuth2Client builders. + OAuth2Client *OAuth2ClientClient + // OfflineSession is the client for interacting with the OfflineSession builders. + OfflineSession *OfflineSessionClient + // Password is the client for interacting with the Password builders. + Password *PasswordClient + // RefreshToken is the client for interacting with the RefreshToken builders. + RefreshToken *RefreshTokenClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.AuthCode = NewAuthCodeClient(tx.config) + tx.AuthRequest = NewAuthRequestClient(tx.config) + tx.Connector = NewConnectorClient(tx.config) + tx.DeviceRequest = NewDeviceRequestClient(tx.config) + tx.DeviceToken = NewDeviceTokenClient(tx.config) + tx.Keys = NewKeysClient(tx.config) + tx.OAuth2Client = NewOAuth2ClientClient(tx.config) + tx.OfflineSession = NewOfflineSessionClient(tx.config) + tx.Password = NewPasswordClient(tx.config) + tx.RefreshToken = NewRefreshTokenClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: AuthCode.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/vendor/github.com/dexidp/dex/storage/ent/generate.go b/vendor/github.com/dexidp/dex/storage/ent/generate.go new file mode 100644 index 00000000..805e8cdb --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/generate.go @@ -0,0 +1,3 @@ +package ent + +//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema --target ./db diff --git a/vendor/github.com/dexidp/dex/storage/ent/mysql.go b/vendor/github.com/dexidp/dex/storage/ent/mysql.go new file mode 100644 index 00000000..4a9407f9 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/mysql.go @@ -0,0 +1,158 @@ +package ent + +import ( + "context" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "database/sql" + "fmt" + "net" + "os" + "strconv" + "time" + + entSQL "entgo.io/ent/dialect/sql" + "github.com/go-sql-driver/mysql" // Register mysql driver. + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/client" + "github.com/dexidp/dex/storage/ent/db" +) + +const ( + // MySQL SSL modes + mysqlSSLTrue = "true" + mysqlSSLFalse = "false" + mysqlSSLSkipVerify = "skip-verify" + mysqlSSLCustom = "custom" +) + +// MySQL options for creating an SQL db. +type MySQL struct { + NetworkDB + + SSL SSL `json:"ssl"` + + params map[string]string +} + +// Open always returns a new in sqlite3 storage. +func (m *MySQL) Open(logger log.Logger) (storage.Storage, error) { + logger.Debug("experimental ent-based storage driver is enabled") + drv, err := m.driver() + if err != nil { + return nil, err + } + + databaseClient := client.NewDatabase( + client.WithClient(db.NewClient(db.Driver(drv))), + client.WithHasher(sha256.New), + // Set tx isolation leve for each transaction as dex does for postgres + client.WithTxIsolationLevel(sql.LevelSerializable), + ) + + if err := databaseClient.Schema().Create(context.TODO()); err != nil { + return nil, err + } + + return databaseClient, nil +} + +func (m *MySQL) driver() (*entSQL.Driver, error) { + var tlsConfig string + + switch { + case m.SSL.CAFile != "" || m.SSL.CertFile != "" || m.SSL.KeyFile != "": + if err := m.makeTLSConfig(); err != nil { + return nil, fmt.Errorf("failed to make TLS config: %v", err) + } + tlsConfig = mysqlSSLCustom + case m.SSL.Mode == "": + tlsConfig = mysqlSSLTrue + default: + tlsConfig = m.SSL.Mode + } + + drv, err := entSQL.Open("mysql", m.dsn(tlsConfig)) + if err != nil { + return nil, err + } + + if m.MaxIdleConns == 0 { + /* Override default behaviour to fix https://github.com/dexidp/dex/issues/1608 */ + drv.DB().SetMaxIdleConns(0) + } else { + drv.DB().SetMaxIdleConns(m.MaxIdleConns) + } + + return drv, nil +} + +func (m *MySQL) dsn(tlsConfig string) string { + cfg := mysql.Config{ + User: m.User, + Passwd: m.Password, + DBName: m.Database, + AllowNativePasswords: true, + + Timeout: time.Second * time.Duration(m.ConnectionTimeout), + + TLSConfig: tlsConfig, + + ParseTime: true, + Params: make(map[string]string), + } + + if m.Host != "" { + if m.Host[0] != '/' { + cfg.Net = "tcp" + cfg.Addr = m.Host + + if m.Port != 0 { + cfg.Addr = net.JoinHostPort(m.Host, strconv.Itoa(int(m.Port))) + } + } else { + cfg.Net = "unix" + cfg.Addr = m.Host + } + } + + for k, v := range m.params { + cfg.Params[k] = v + } + + return cfg.FormatDSN() +} + +func (m *MySQL) makeTLSConfig() error { + cfg := &tls.Config{} + + if m.SSL.CAFile != "" { + rootCertPool := x509.NewCertPool() + + pem, err := os.ReadFile(m.SSL.CAFile) + if err != nil { + return err + } + + if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { + return fmt.Errorf("failed to append PEM") + } + cfg.RootCAs = rootCertPool + } + + if m.SSL.CertFile != "" && m.SSL.KeyFile != "" { + clientCert := make([]tls.Certificate, 0, 1) + certs, err := tls.LoadX509KeyPair(m.SSL.CertFile, m.SSL.KeyFile) + if err != nil { + return err + } + clientCert = append(clientCert, certs) + cfg.Certificates = clientCert + } + + mysql.RegisterTLSConfig(mysqlSSLCustom, cfg) + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/postgres.go b/vendor/github.com/dexidp/dex/storage/ent/postgres.go new file mode 100644 index 00000000..ac091e70 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/postgres.go @@ -0,0 +1,154 @@ +package ent + +import ( + "context" + "crypto/sha256" + "database/sql" + "fmt" + "net" + "regexp" + "strconv" + "strings" + "time" + + entSQL "entgo.io/ent/dialect/sql" + _ "github.com/lib/pq" // Register postgres driver. + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/client" + "github.com/dexidp/dex/storage/ent/db" +) + +//nolint +const ( + // postgres SSL modes + pgSSLDisable = "disable" + pgSSLRequire = "require" + pgSSLVerifyCA = "verify-ca" + pgSSLVerifyFull = "verify-full" +) + +// Postgres options for creating an SQL db. +type Postgres struct { + NetworkDB + + SSL SSL `json:"ssl"` +} + +// Open always returns a new in sqlite3 storage. +func (p *Postgres) Open(logger log.Logger) (storage.Storage, error) { + logger.Debug("experimental ent-based storage driver is enabled") + drv, err := p.driver() + if err != nil { + return nil, err + } + + databaseClient := client.NewDatabase( + client.WithClient(db.NewClient(db.Driver(drv))), + client.WithHasher(sha256.New), + // The default behavior for Postgres transactions is consistent reads, not consistent writes. + // For each transaction opened, ensure it has the correct isolation level. + // + // See: https://www.postgresql.org/docs/9.3/static/sql-set-transaction.html + client.WithTxIsolationLevel(sql.LevelSerializable), + ) + + if err := databaseClient.Schema().Create(context.TODO()); err != nil { + return nil, err + } + + return databaseClient, nil +} + +func (p *Postgres) driver() (*entSQL.Driver, error) { + drv, err := entSQL.Open("postgres", p.dsn()) + if err != nil { + return nil, err + } + + // set database/sql tunables if configured + if p.ConnMaxLifetime != 0 { + drv.DB().SetConnMaxLifetime(time.Duration(p.ConnMaxLifetime) * time.Second) + } + + if p.MaxIdleConns == 0 { + drv.DB().SetMaxIdleConns(5) + } else { + drv.DB().SetMaxIdleConns(p.MaxIdleConns) + } + + if p.MaxOpenConns == 0 { + drv.DB().SetMaxOpenConns(5) + } else { + drv.DB().SetMaxOpenConns(p.MaxOpenConns) + } + + return drv, nil +} + +func (p *Postgres) dsn() string { + // detect host:port for backwards-compatibility + host, port, err := net.SplitHostPort(p.Host) + if err != nil { + // not host:port, probably unix socket or bare address + host = p.Host + if p.Port != 0 { + port = strconv.Itoa(int(p.Port)) + } + } + + var parameters []string + addParam := func(key, val string) { + parameters = append(parameters, fmt.Sprintf("%s=%s", key, val)) + } + + addParam("connect_timeout", strconv.Itoa(p.ConnectionTimeout)) + + if host != "" { + addParam("host", dataSourceStr(host)) + } + + if port != "" { + addParam("port", port) + } + + if p.User != "" { + addParam("user", dataSourceStr(p.User)) + } + + if p.Password != "" { + addParam("password", dataSourceStr(p.Password)) + } + + if p.Database != "" { + addParam("dbname", dataSourceStr(p.Database)) + } + + if p.SSL.Mode == "" { + // Assume the strictest mode if unspecified. + addParam("sslmode", dataSourceStr(pgSSLVerifyFull)) + } else { + addParam("sslmode", dataSourceStr(p.SSL.Mode)) + } + + if p.SSL.CAFile != "" { + addParam("sslrootcert", dataSourceStr(p.SSL.CAFile)) + } + + if p.SSL.CertFile != "" { + addParam("sslcert", dataSourceStr(p.SSL.CertFile)) + } + + if p.SSL.KeyFile != "" { + addParam("sslkey", dataSourceStr(p.SSL.KeyFile)) + } + + return strings.Join(parameters, " ") +} + +var strEsc = regexp.MustCompile(`([\\'])`) + +func dataSourceStr(str string) string { + return "'" + strEsc.ReplaceAllString(str, `\$1`) + "'" +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/BUILD b/vendor/github.com/dexidp/dex/storage/ent/schema/BUILD new file mode 100644 index 00000000..cf4a0b40 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "schema", + srcs = [ + "authcode.go", + "authrequest.go", + "client.go", + "connector.go", + "devicerequest.go", + "devicetoken.go", + "dialects.go", + "keys.go", + "offlinesession.go", + "password.go", + "refreshtoken.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/ent/schema", + importpath = "github.com/dexidp/dex/storage/ent/schema", + visibility = ["//visibility:public"], + deps = [ + "//vendor/entgo.io/ent", + "//vendor/entgo.io/ent/dialect", + "//vendor/entgo.io/ent/schema/field", + "//vendor/github.com/dexidp/dex/storage", + "//vendor/gopkg.in/square/go-jose.v2:go-jose_v2", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/authcode.go b/vendor/github.com/dexidp/dex/storage/ent/schema/authcode.go new file mode 100644 index 00000000..1574347b --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/authcode.go @@ -0,0 +1,90 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +/* Original SQL table: +create table auth_code +( + id text not null primary key, + client_id text not null, + scopes blob not null, + nonce text not null, + redirect_uri text not null, + claims_user_id text not null, + claims_username text not null, + claims_email text not null, + claims_email_verified integer not null, + claims_groups blob not null, + connector_id text not null, + connector_data blob, + expiry timestamp not null, + claims_preferred_username text default '' not null, + code_challenge text default '' not null, + code_challenge_method text default '' not null +); +*/ + +// AuthCode holds the schema definition for the AuthCode entity. +type AuthCode struct { + ent.Schema +} + +// Fields of the AuthCode. +func (AuthCode) Fields() []ent.Field { + return []ent.Field{ + field.Text("id"). + SchemaType(textSchema). + NotEmpty(). + Unique(), + field.Text("client_id"). + SchemaType(textSchema). + NotEmpty(), + field.JSON("scopes", []string{}). + Optional(), + field.Text("nonce"). + SchemaType(textSchema). + NotEmpty(), + field.Text("redirect_uri"). + SchemaType(textSchema). + NotEmpty(), + + field.Text("claims_user_id"). + SchemaType(textSchema). + NotEmpty(), + field.Text("claims_username"). + SchemaType(textSchema). + NotEmpty(), + field.Text("claims_email"). + SchemaType(textSchema). + NotEmpty(), + field.Bool("claims_email_verified"), + field.JSON("claims_groups", []string{}). + Optional(), + field.Text("claims_preferred_username"). + SchemaType(textSchema). + Default(""), + + field.Text("connector_id"). + SchemaType(textSchema). + NotEmpty(), + field.Bytes("connector_data"). + Nillable(). + Optional(), + field.Time("expiry"). + SchemaType(timeSchema), + field.Text("code_challenge"). + SchemaType(textSchema). + Default(""), + field.Text("code_challenge_method"). + SchemaType(textSchema). + Default(""), + } +} + +// Edges of the AuthCode. +func (AuthCode) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/authrequest.go b/vendor/github.com/dexidp/dex/storage/ent/schema/authrequest.go new file mode 100644 index 00000000..2b75927b --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/authrequest.go @@ -0,0 +1,97 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +/* Original SQL table: +create table auth_request +( + id text not null primary key, + client_id text not null, + response_types blob not null, + scopes blob not null, + redirect_uri text not null, + nonce text not null, + state text not null, + force_approval_prompt integer not null, + logged_in integer not null, + claims_user_id text not null, + claims_username text not null, + claims_email text not null, + claims_email_verified integer not null, + claims_groups blob not null, + connector_id text not null, + connector_data blob, + expiry timestamp not null, + claims_preferred_username text default '' not null, + code_challenge text default '' not null, + code_challenge_method text default '' not null, + hmac_key blob +); +*/ + +// AuthRequest holds the schema definition for the AuthRequest entity. +type AuthRequest struct { + ent.Schema +} + +// Fields of the AuthRequest. +func (AuthRequest) Fields() []ent.Field { + return []ent.Field{ + field.Text("id"). + SchemaType(textSchema). + NotEmpty(). + Unique(), + field.Text("client_id"). + SchemaType(textSchema), + field.JSON("scopes", []string{}). + Optional(), + field.JSON("response_types", []string{}). + Optional(), + field.Text("redirect_uri"). + SchemaType(textSchema), + field.Text("nonce"). + SchemaType(textSchema), + field.Text("state"). + SchemaType(textSchema), + + field.Bool("force_approval_prompt"), + field.Bool("logged_in"), + + field.Text("claims_user_id"). + SchemaType(textSchema), + field.Text("claims_username"). + SchemaType(textSchema), + field.Text("claims_email"). + SchemaType(textSchema), + field.Bool("claims_email_verified"), + field.JSON("claims_groups", []string{}). + Optional(), + field.Text("claims_preferred_username"). + SchemaType(textSchema). + Default(""), + + field.Text("connector_id"). + SchemaType(textSchema), + field.Bytes("connector_data"). + Nillable(). + Optional(), + field.Time("expiry"). + SchemaType(timeSchema), + + field.Text("code_challenge"). + SchemaType(textSchema). + Default(""), + field.Text("code_challenge_method"). + SchemaType(textSchema). + Default(""), + field.Bytes("hmac_key"), + } +} + +// Edges of the AuthRequest. +func (AuthRequest) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/client.go b/vendor/github.com/dexidp/dex/storage/ent/schema/client.go new file mode 100644 index 00000000..b897c52a --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/client.go @@ -0,0 +1,54 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +/* Original SQL table: +create table client +( + id text not null primary key, + secret text not null, + redirect_uris blob not null, + trusted_peers blob not null, + public integer not null, + name text not null, + logo_url text not null +); +*/ + +// OAuth2Client holds the schema definition for the Client entity. +type OAuth2Client struct { + ent.Schema +} + +// Fields of the OAuth2Client. +func (OAuth2Client) Fields() []ent.Field { + return []ent.Field{ + field.Text("id"). + SchemaType(textSchema). + MaxLen(100). + NotEmpty(). + Unique(), + field.Text("secret"). + SchemaType(textSchema). + NotEmpty(), + field.JSON("redirect_uris", []string{}). + Optional(), + field.JSON("trusted_peers", []string{}). + Optional(), + field.Bool("public"), + field.Text("name"). + SchemaType(textSchema). + NotEmpty(), + field.Text("logo_url"). + SchemaType(textSchema). + NotEmpty(), + } +} + +// Edges of the OAuth2Client. +func (OAuth2Client) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/connector.go b/vendor/github.com/dexidp/dex/storage/ent/schema/connector.go new file mode 100644 index 00000000..41b65eb4 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/connector.go @@ -0,0 +1,47 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +/* Original SQL table: +create table connector +( + id text not null primary key, + type text not null, + name text not null, + resource_version text not null, + config blob +); +*/ + +// Connector holds the schema definition for the Client entity. +type Connector struct { + ent.Schema +} + +// Fields of the Connector. +func (Connector) Fields() []ent.Field { + return []ent.Field{ + field.Text("id"). + SchemaType(textSchema). + MaxLen(100). + NotEmpty(). + Unique(), + field.Text("type"). + SchemaType(textSchema). + NotEmpty(), + field.Text("name"). + SchemaType(textSchema). + NotEmpty(), + field.Text("resource_version"). + SchemaType(textSchema), + field.Bytes("config"), + } +} + +// Edges of the Connector. +func (Connector) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/devicerequest.go b/vendor/github.com/dexidp/dex/storage/ent/schema/devicerequest.go new file mode 100644 index 00000000..00a61386 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/devicerequest.go @@ -0,0 +1,51 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +/* Original SQL table: +create table device_request +( + user_code text not null primary key, + device_code text not null, + client_id text not null, + client_secret text, + scopes blob not null, + expiry timestamp not null +); +*/ + +// DeviceRequest holds the schema definition for the DeviceRequest entity. +type DeviceRequest struct { + ent.Schema +} + +// Fields of the DeviceRequest. +func (DeviceRequest) Fields() []ent.Field { + return []ent.Field{ + field.Text("user_code"). + SchemaType(textSchema). + NotEmpty(). + Unique(), + field.Text("device_code"). + SchemaType(textSchema). + NotEmpty(), + field.Text("client_id"). + SchemaType(textSchema). + NotEmpty(), + field.Text("client_secret"). + SchemaType(textSchema). + NotEmpty(), + field.JSON("scopes", []string{}). + Optional(), + field.Time("expiry"). + SchemaType(timeSchema), + } +} + +// Edges of the DeviceRequest. +func (DeviceRequest) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/devicetoken.go b/vendor/github.com/dexidp/dex/storage/ent/schema/devicetoken.go new file mode 100644 index 00000000..dc0e7b8e --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/devicetoken.go @@ -0,0 +1,55 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +/* Original SQL table: +create table device_token +( + device_code text not null primary key, + status text not null, + token blob, + expiry timestamp not null, + last_request timestamp not null, + poll_interval integer not null, + code_challenge text default '' not null, + code_challenge_method text default '' not null +); +*/ + +// DeviceToken holds the schema definition for the DeviceToken entity. +type DeviceToken struct { + ent.Schema +} + +// Fields of the DeviceToken. +func (DeviceToken) Fields() []ent.Field { + return []ent.Field{ + field.Text("device_code"). + SchemaType(textSchema). + NotEmpty(). + Unique(), + field.Text("status"). + SchemaType(textSchema). + NotEmpty(), + field.Bytes("token").Nillable().Optional(), + field.Time("expiry"). + SchemaType(timeSchema), + field.Time("last_request"). + SchemaType(timeSchema), + field.Int("poll_interval"), + field.Text("code_challenge"). + SchemaType(textSchema). + Default(""), + field.Text("code_challenge_method"). + SchemaType(textSchema). + Default(""), + } +} + +// Edges of the DeviceToken. +func (DeviceToken) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/dialects.go b/vendor/github.com/dexidp/dex/storage/ent/schema/dialects.go new file mode 100644 index 00000000..2e5be8fb --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/dialects.go @@ -0,0 +1,21 @@ +package schema + +import ( + "entgo.io/ent/dialect" +) + +var textSchema = map[string]string{ + dialect.Postgres: "text", + dialect.SQLite: "text", + // MySQL doesn't support indices on text fields w/o + // specifying key length. Use varchar instead (767 byte + // is the max key length for InnoDB with 4k pages). + // For compound indexes (with two keys) even less. + dialect.MySQL: "varchar(384)", +} + +var timeSchema = map[string]string{ + dialect.Postgres: "timestamptz", + dialect.SQLite: "timestamp", + dialect.MySQL: "datetime(3)", +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/keys.go b/vendor/github.com/dexidp/dex/storage/ent/schema/keys.go new file mode 100644 index 00000000..ec5cd3f6 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/keys.go @@ -0,0 +1,45 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "gopkg.in/square/go-jose.v2" + + "github.com/dexidp/dex/storage" +) + +/* Original SQL table: +create table keys +( + id text not null primary key, + verification_keys blob not null, + signing_key blob not null, + signing_key_pub blob not null, + next_rotation timestamp not null +); +*/ + +// Keys holds the schema definition for the Keys entity. +type Keys struct { + ent.Schema +} + +// Fields of the Keys. +func (Keys) Fields() []ent.Field { + return []ent.Field{ + field.Text("id"). + SchemaType(textSchema). + NotEmpty(). + Unique(), + field.JSON("verification_keys", []storage.VerificationKey{}), + field.JSON("signing_key", jose.JSONWebKey{}), + field.JSON("signing_key_pub", jose.JSONWebKey{}), + field.Time("next_rotation"). + SchemaType(timeSchema), + } +} + +// Edges of the Keys. +func (Keys) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/offlinesession.go b/vendor/github.com/dexidp/dex/storage/ent/schema/offlinesession.go new file mode 100644 index 00000000..e9a166c3 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/offlinesession.go @@ -0,0 +1,46 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +/* Original SQL table: +create table offline_session +( + user_id text not null, + conn_id text not null, + refresh blob not null, + connector_data blob, + primary key (user_id, conn_id) +); +*/ + +// OfflineSession holds the schema definition for the OfflineSession entity. +type OfflineSession struct { + ent.Schema +} + +// Fields of the OfflineSession. +func (OfflineSession) Fields() []ent.Field { + return []ent.Field{ + // Using id field here because it's impossible to create multi-key primary yet + field.Text("id"). + SchemaType(textSchema). + NotEmpty(). + Unique(), + field.Text("user_id"). + SchemaType(textSchema). + NotEmpty(), + field.Text("conn_id"). + SchemaType(textSchema). + NotEmpty(), + field.Bytes("refresh"), + field.Bytes("connector_data").Nillable().Optional(), + } +} + +// Edges of the OfflineSession. +func (OfflineSession) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/password.go b/vendor/github.com/dexidp/dex/storage/ent/schema/password.go new file mode 100644 index 00000000..cbc72fc5 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/password.go @@ -0,0 +1,44 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +/* Original SQL table: +create table password +( + email text not null primary key, + hash blob not null, + username text not null, + user_id text not null +); +*/ + +// Password holds the schema definition for the Password entity. +type Password struct { + ent.Schema +} + +// Fields of the Password. +func (Password) Fields() []ent.Field { + return []ent.Field{ + field.Text("email"). + SchemaType(textSchema). + StorageKey("email"). // use email as ID field to make querying easier + NotEmpty(). + Unique(), + field.Bytes("hash"), + field.Text("username"). + SchemaType(textSchema). + NotEmpty(), + field.Text("user_id"). + SchemaType(textSchema). + NotEmpty(), + } +} + +// Edges of the Password. +func (Password) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/schema/refreshtoken.go b/vendor/github.com/dexidp/dex/storage/ent/schema/refreshtoken.go new file mode 100644 index 00000000..86e61d52 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/schema/refreshtoken.go @@ -0,0 +1,95 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +/* Original SQL table: +create table refresh_token +( + id text not null primary key, + client_id text not null, + scopes blob not null, + nonce text not null, + claims_user_id text not null, + claims_username text not null, + claims_email text not null, + claims_email_verified integer not null, + claims_groups blob not null, + connector_id text not null, + connector_data blob, + token text default '' not null, + created_at timestamp default '0001-01-01 00:00:00 UTC' not null, + last_used timestamp default '0001-01-01 00:00:00 UTC' not null, + claims_preferred_username text default '' not null, + obsolete_token text default '' +); +*/ + +// RefreshToken holds the schema definition for the RefreshToken entity. +type RefreshToken struct { + ent.Schema +} + +// Fields of the RefreshToken. +func (RefreshToken) Fields() []ent.Field { + return []ent.Field{ + field.Text("id"). + SchemaType(textSchema). + NotEmpty(). + Unique(), + field.Text("client_id"). + SchemaType(textSchema). + NotEmpty(), + field.JSON("scopes", []string{}). + Optional(), + field.Text("nonce"). + SchemaType(textSchema). + NotEmpty(), + + field.Text("claims_user_id"). + SchemaType(textSchema). + NotEmpty(), + field.Text("claims_username"). + SchemaType(textSchema). + NotEmpty(), + field.Text("claims_email"). + SchemaType(textSchema). + NotEmpty(), + field.Bool("claims_email_verified"), + field.JSON("claims_groups", []string{}). + Optional(), + field.Text("claims_preferred_username"). + SchemaType(textSchema). + Default(""), + + field.Text("connector_id"). + SchemaType(textSchema). + NotEmpty(), + field.Bytes("connector_data"). + Nillable(). + Optional(), + + field.Text("token"). + SchemaType(textSchema). + Default(""), + field.Text("obsolete_token"). + SchemaType(textSchema). + Default(""), + + field.Time("created_at"). + SchemaType(timeSchema). + Default(time.Now), + field.Time("last_used"). + SchemaType(timeSchema). + Default(time.Now), + } +} + +// Edges of the RefreshToken. +func (RefreshToken) Edges() []ent.Edge { + return []ent.Edge{} +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/sqlite.go b/vendor/github.com/dexidp/dex/storage/ent/sqlite.go new file mode 100644 index 00000000..c0b442f4 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/sqlite.go @@ -0,0 +1,61 @@ +package ent + +import ( + "context" + "crypto/sha256" + "strings" + + "entgo.io/ent/dialect/sql" + _ "github.com/mattn/go-sqlite3" // Register sqlite driver. + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/ent/client" + "github.com/dexidp/dex/storage/ent/db" +) + +// SQLite3 options for creating an SQL db. +type SQLite3 struct { + File string `json:"file"` +} + +// Open always returns a new in sqlite3 storage. +func (s *SQLite3) Open(logger log.Logger) (storage.Storage, error) { + logger.Debug("experimental ent-based storage driver is enabled") + + // Implicitly set foreign_keys pragma to "on" because it is required by ent + s.File = addFK(s.File) + + drv, err := sql.Open("sqlite3", s.File) + if err != nil { + return nil, err + } + + // always allow only one connection to sqlite3, any other thread/go-routine + // attempting concurrent access will have to wait + pool := drv.DB() + pool.SetMaxOpenConns(1) + + databaseClient := client.NewDatabase( + client.WithClient(db.NewClient(db.Driver(drv))), + client.WithHasher(sha256.New), + ) + + if err := databaseClient.Schema().Create(context.TODO()); err != nil { + return nil, err + } + + return databaseClient, nil +} + +func addFK(dsn string) string { + if strings.Contains(dsn, "_fk") { + return dsn + } + + delim := "?" + if strings.Contains(dsn, "?") { + delim = "&" + } + return dsn + delim + "_fk=1" +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/types.go b/vendor/github.com/dexidp/dex/storage/ent/types.go new file mode 100644 index 00000000..062f8640 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/types.go @@ -0,0 +1,25 @@ +package ent + +// NetworkDB contains options common to SQL databases accessed over network. +type NetworkDB struct { + Database string + User string + Password string + Host string + Port uint16 + + ConnectionTimeout int // Seconds + + MaxOpenConns int // default: 5 + MaxIdleConns int // default: 5 + ConnMaxLifetime int // Seconds, default: not set +} + +// SSL represents SSL options for network databases. +type SSL struct { + Mode string + CAFile string + // Files for client auth. + KeyFile string + CertFile string +} diff --git a/vendor/github.com/dexidp/dex/storage/ent/utils.go b/vendor/github.com/dexidp/dex/storage/ent/utils.go new file mode 100644 index 00000000..6f51e065 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/ent/utils.go @@ -0,0 +1,10 @@ +package ent + +import "os" + +func getenv(key, defaultVal string) string { + if val := os.Getenv(key); val != "" { + return val + } + return defaultVal +} diff --git a/vendor/github.com/dexidp/dex/storage/etcd/BUILD b/vendor/github.com/dexidp/dex/storage/etcd/BUILD new file mode 100644 index 00000000..b4e2b63d --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/etcd/BUILD @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "etcd", + srcs = [ + "config.go", + "etcd.go", + "types.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/etcd", + importpath = "github.com/dexidp/dex/storage/etcd", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/github.com/dexidp/dex/storage", + "//vendor/go.etcd.io/etcd/client/pkg/v3/transport", + "//vendor/go.etcd.io/etcd/client/v3:client", + "//vendor/go.etcd.io/etcd/client/v3/namespace", + "//vendor/gopkg.in/square/go-jose.v2:go-jose_v2", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/etcd/config.go b/vendor/github.com/dexidp/dex/storage/etcd/config.go new file mode 100644 index 00000000..7f1a7b4f --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/etcd/config.go @@ -0,0 +1,91 @@ +package etcd + +import ( + "time" + + "go.etcd.io/etcd/client/pkg/v3/transport" + clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/client/v3/namespace" + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" +) + +var defaultDialTimeout = 2 * time.Second + +// SSL represents SSL options for etcd databases. +type SSL struct { + ServerName string `json:"serverName" yaml:"serverName"` + CAFile string `json:"caFile" yaml:"caFile"` + KeyFile string `json:"keyFile" yaml:"keyFile"` + CertFile string `json:"certFile" yaml:"certFile"` +} + +// Etcd options for connecting to etcd databases. +// If you are using a shared etcd cluster for storage, it might be useful to +// configure an etcd namespace either via Namespace field or using `etcd grpc-proxy +// --namespace=` +type Etcd struct { + Endpoints []string `json:"endpoints" yaml:"endpoints"` + Namespace string `json:"namespace" yaml:"namespace"` + Username string `json:"username" yaml:"username"` + Password string `json:"password" yaml:"password"` + SSL SSL `json:"ssl" yaml:"ssl"` +} + +// Open creates a new storage implementation backed by Etcd +func (p *Etcd) Open(logger log.Logger) (storage.Storage, error) { + return p.open(logger) +} + +func (p *Etcd) open(logger log.Logger) (*conn, error) { + cfg := clientv3.Config{ + Endpoints: p.Endpoints, + DialTimeout: defaultDialTimeout, + Username: p.Username, + Password: p.Password, + } + + var cfgtls *transport.TLSInfo + tlsinfo := transport.TLSInfo{} + if p.SSL.CertFile != "" { + tlsinfo.CertFile = p.SSL.CertFile + cfgtls = &tlsinfo + } + + if p.SSL.KeyFile != "" { + tlsinfo.KeyFile = p.SSL.KeyFile + cfgtls = &tlsinfo + } + + if p.SSL.CAFile != "" { + tlsinfo.TrustedCAFile = p.SSL.CAFile + cfgtls = &tlsinfo + } + + if p.SSL.ServerName != "" { + tlsinfo.ServerName = p.SSL.ServerName + cfgtls = &tlsinfo + } + + if cfgtls != nil { + clientTLS, err := cfgtls.ClientConfig() + if err != nil { + return nil, err + } + cfg.TLS = clientTLS + } + + db, err := clientv3.New(cfg) + if err != nil { + return nil, err + } + if len(p.Namespace) > 0 { + db.KV = namespace.NewKV(db.KV, p.Namespace) + } + c := &conn{ + db: db, + logger: logger, + } + return c, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/etcd/etcd.go b/vendor/github.com/dexidp/dex/storage/etcd/etcd.go new file mode 100644 index 00000000..13e815ec --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/etcd/etcd.go @@ -0,0 +1,646 @@ +package etcd + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + clientv3 "go.etcd.io/etcd/client/v3" + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" +) + +const ( + clientPrefix = "client/" + authCodePrefix = "auth_code/" + refreshTokenPrefix = "refresh_token/" + authRequestPrefix = "auth_req/" + passwordPrefix = "password/" + offlineSessionPrefix = "offline_session/" + connectorPrefix = "connector/" + keysName = "openid-connect-keys" + deviceRequestPrefix = "device_req/" + deviceTokenPrefix = "device_token/" + + // defaultStorageTimeout will be applied to all storage's operations. + defaultStorageTimeout = 5 * time.Second +) + +type conn struct { + db *clientv3.Client + logger log.Logger +} + +func (c *conn) Close() error { + return c.db.Close() +} + +func (c *conn) GarbageCollect(now time.Time) (result storage.GCResult, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + authRequests, err := c.listAuthRequests(ctx) + if err != nil { + return result, err + } + + var delErr error + for _, authRequest := range authRequests { + if now.After(authRequest.Expiry) { + if err := c.deleteKey(ctx, keyID(authRequestPrefix, authRequest.ID)); err != nil { + c.logger.Errorf("failed to delete auth request: %v", err) + delErr = fmt.Errorf("failed to delete auth request: %v", err) + } + result.AuthRequests++ + } + } + if delErr != nil { + return result, delErr + } + + authCodes, err := c.listAuthCodes(ctx) + if err != nil { + return result, err + } + + for _, authCode := range authCodes { + if now.After(authCode.Expiry) { + if err := c.deleteKey(ctx, keyID(authCodePrefix, authCode.ID)); err != nil { + c.logger.Errorf("failed to delete auth code %v", err) + delErr = fmt.Errorf("failed to delete auth code: %v", err) + } + result.AuthCodes++ + } + } + + deviceRequests, err := c.listDeviceRequests(ctx) + if err != nil { + return result, err + } + + for _, deviceRequest := range deviceRequests { + if now.After(deviceRequest.Expiry) { + if err := c.deleteKey(ctx, keyID(deviceRequestPrefix, deviceRequest.UserCode)); err != nil { + c.logger.Errorf("failed to delete device request %v", err) + delErr = fmt.Errorf("failed to delete device request: %v", err) + } + result.DeviceRequests++ + } + } + + deviceTokens, err := c.listDeviceTokens(ctx) + if err != nil { + return result, err + } + + for _, deviceToken := range deviceTokens { + if now.After(deviceToken.Expiry) { + if err := c.deleteKey(ctx, keyID(deviceTokenPrefix, deviceToken.DeviceCode)); err != nil { + c.logger.Errorf("failed to delete device token %v", err) + delErr = fmt.Errorf("failed to delete device token: %v", err) + } + result.DeviceTokens++ + } + } + return result, delErr +} + +func (c *conn) CreateAuthRequest(a storage.AuthRequest) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnCreate(ctx, keyID(authRequestPrefix, a.ID), fromStorageAuthRequest(a)) +} + +func (c *conn) GetAuthRequest(id string) (a storage.AuthRequest, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + var req AuthRequest + if err = c.getKey(ctx, keyID(authRequestPrefix, id), &req); err != nil { + return + } + return toStorageAuthRequest(req), nil +} + +func (c *conn) UpdateAuthRequest(id string, updater func(a storage.AuthRequest) (storage.AuthRequest, error)) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnUpdate(ctx, keyID(authRequestPrefix, id), func(currentValue []byte) ([]byte, error) { + var current AuthRequest + if len(currentValue) > 0 { + if err := json.Unmarshal(currentValue, ¤t); err != nil { + return nil, err + } + } + updated, err := updater(toStorageAuthRequest(current)) + if err != nil { + return nil, err + } + return json.Marshal(fromStorageAuthRequest(updated)) + }) +} + +func (c *conn) DeleteAuthRequest(id string) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.deleteKey(ctx, keyID(authRequestPrefix, id)) +} + +func (c *conn) CreateAuthCode(a storage.AuthCode) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnCreate(ctx, keyID(authCodePrefix, a.ID), fromStorageAuthCode(a)) +} + +func (c *conn) GetAuthCode(id string) (a storage.AuthCode, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + var ac AuthCode + err = c.getKey(ctx, keyID(authCodePrefix, id), &ac) + if err == nil { + a = toStorageAuthCode(ac) + } + return a, err +} + +func (c *conn) DeleteAuthCode(id string) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.deleteKey(ctx, keyID(authCodePrefix, id)) +} + +func (c *conn) CreateRefresh(r storage.RefreshToken) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnCreate(ctx, keyID(refreshTokenPrefix, r.ID), fromStorageRefreshToken(r)) +} + +func (c *conn) GetRefresh(id string) (r storage.RefreshToken, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + var token RefreshToken + if err = c.getKey(ctx, keyID(refreshTokenPrefix, id), &token); err != nil { + return + } + return toStorageRefreshToken(token), nil +} + +func (c *conn) UpdateRefreshToken(id string, updater func(old storage.RefreshToken) (storage.RefreshToken, error)) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnUpdate(ctx, keyID(refreshTokenPrefix, id), func(currentValue []byte) ([]byte, error) { + var current RefreshToken + if len(currentValue) > 0 { + if err := json.Unmarshal(currentValue, ¤t); err != nil { + return nil, err + } + } + updated, err := updater(toStorageRefreshToken(current)) + if err != nil { + return nil, err + } + return json.Marshal(fromStorageRefreshToken(updated)) + }) +} + +func (c *conn) DeleteRefresh(id string) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.deleteKey(ctx, keyID(refreshTokenPrefix, id)) +} + +func (c *conn) ListRefreshTokens() (tokens []storage.RefreshToken, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + res, err := c.db.Get(ctx, refreshTokenPrefix, clientv3.WithPrefix()) + if err != nil { + return tokens, err + } + for _, v := range res.Kvs { + var token RefreshToken + if err = json.Unmarshal(v.Value, &token); err != nil { + return tokens, err + } + tokens = append(tokens, toStorageRefreshToken(token)) + } + return tokens, nil +} + +func (c *conn) CreateClient(cli storage.Client) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnCreate(ctx, keyID(clientPrefix, cli.ID), cli) +} + +func (c *conn) GetClient(id string) (cli storage.Client, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + err = c.getKey(ctx, keyID(clientPrefix, id), &cli) + return cli, err +} + +func (c *conn) UpdateClient(id string, updater func(old storage.Client) (storage.Client, error)) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnUpdate(ctx, keyID(clientPrefix, id), func(currentValue []byte) ([]byte, error) { + var current storage.Client + if len(currentValue) > 0 { + if err := json.Unmarshal(currentValue, ¤t); err != nil { + return nil, err + } + } + updated, err := updater(current) + if err != nil { + return nil, err + } + return json.Marshal(updated) + }) +} + +func (c *conn) DeleteClient(id string) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.deleteKey(ctx, keyID(clientPrefix, id)) +} + +func (c *conn) ListClients() (clients []storage.Client, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + res, err := c.db.Get(ctx, clientPrefix, clientv3.WithPrefix()) + if err != nil { + return clients, err + } + for _, v := range res.Kvs { + var cli storage.Client + if err = json.Unmarshal(v.Value, &cli); err != nil { + return clients, err + } + clients = append(clients, cli) + } + return clients, nil +} + +func (c *conn) CreatePassword(p storage.Password) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnCreate(ctx, passwordPrefix+strings.ToLower(p.Email), p) +} + +func (c *conn) GetPassword(email string) (p storage.Password, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + err = c.getKey(ctx, keyEmail(passwordPrefix, email), &p) + return p, err +} + +func (c *conn) UpdatePassword(email string, updater func(p storage.Password) (storage.Password, error)) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnUpdate(ctx, keyEmail(passwordPrefix, email), func(currentValue []byte) ([]byte, error) { + var current storage.Password + if len(currentValue) > 0 { + if err := json.Unmarshal(currentValue, ¤t); err != nil { + return nil, err + } + } + updated, err := updater(current) + if err != nil { + return nil, err + } + return json.Marshal(updated) + }) +} + +func (c *conn) DeletePassword(email string) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.deleteKey(ctx, keyEmail(passwordPrefix, email)) +} + +func (c *conn) ListPasswords() (passwords []storage.Password, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + res, err := c.db.Get(ctx, passwordPrefix, clientv3.WithPrefix()) + if err != nil { + return passwords, err + } + for _, v := range res.Kvs { + var p storage.Password + if err = json.Unmarshal(v.Value, &p); err != nil { + return passwords, err + } + passwords = append(passwords, p) + } + return passwords, nil +} + +func (c *conn) CreateOfflineSessions(s storage.OfflineSessions) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnCreate(ctx, keySession(s.UserID, s.ConnID), fromStorageOfflineSessions(s)) +} + +func (c *conn) UpdateOfflineSessions(userID string, connID string, updater func(s storage.OfflineSessions) (storage.OfflineSessions, error)) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnUpdate(ctx, keySession(userID, connID), func(currentValue []byte) ([]byte, error) { + var current OfflineSessions + if len(currentValue) > 0 { + if err := json.Unmarshal(currentValue, ¤t); err != nil { + return nil, err + } + } + updated, err := updater(toStorageOfflineSessions(current)) + if err != nil { + return nil, err + } + return json.Marshal(fromStorageOfflineSessions(updated)) + }) +} + +func (c *conn) GetOfflineSessions(userID string, connID string) (s storage.OfflineSessions, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + var os OfflineSessions + if err = c.getKey(ctx, keySession(userID, connID), &os); err != nil { + return + } + return toStorageOfflineSessions(os), nil +} + +func (c *conn) DeleteOfflineSessions(userID string, connID string) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.deleteKey(ctx, keySession(userID, connID)) +} + +func (c *conn) CreateConnector(connector storage.Connector) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnCreate(ctx, keyID(connectorPrefix, connector.ID), connector) +} + +func (c *conn) GetConnector(id string) (conn storage.Connector, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + err = c.getKey(ctx, keyID(connectorPrefix, id), &conn) + return conn, err +} + +func (c *conn) UpdateConnector(id string, updater func(s storage.Connector) (storage.Connector, error)) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnUpdate(ctx, keyID(connectorPrefix, id), func(currentValue []byte) ([]byte, error) { + var current storage.Connector + if len(currentValue) > 0 { + if err := json.Unmarshal(currentValue, ¤t); err != nil { + return nil, err + } + } + updated, err := updater(current) + if err != nil { + return nil, err + } + return json.Marshal(updated) + }) +} + +func (c *conn) DeleteConnector(id string) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.deleteKey(ctx, keyID(connectorPrefix, id)) +} + +func (c *conn) ListConnectors() (connectors []storage.Connector, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + res, err := c.db.Get(ctx, connectorPrefix, clientv3.WithPrefix()) + if err != nil { + return nil, err + } + for _, v := range res.Kvs { + var c storage.Connector + if err = json.Unmarshal(v.Value, &c); err != nil { + return nil, err + } + connectors = append(connectors, c) + } + return connectors, nil +} + +func (c *conn) GetKeys() (keys storage.Keys, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + res, err := c.db.Get(ctx, keysName) + if err != nil { + return keys, err + } + if res.Count > 0 && len(res.Kvs) > 0 { + err = json.Unmarshal(res.Kvs[0].Value, &keys) + } + return keys, err +} + +func (c *conn) UpdateKeys(updater func(old storage.Keys) (storage.Keys, error)) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnUpdate(ctx, keysName, func(currentValue []byte) ([]byte, error) { + var current storage.Keys + if len(currentValue) > 0 { + if err := json.Unmarshal(currentValue, ¤t); err != nil { + return nil, err + } + } + updated, err := updater(current) + if err != nil { + return nil, err + } + return json.Marshal(updated) + }) +} + +func (c *conn) deleteKey(ctx context.Context, key string) error { + res, err := c.db.Delete(ctx, key) + if err != nil { + return err + } + if res.Deleted == 0 { + return storage.ErrNotFound + } + return nil +} + +func (c *conn) getKey(ctx context.Context, key string, value interface{}) error { + r, err := c.db.Get(ctx, key) + if err != nil { + return err + } + if r.Count == 0 { + return storage.ErrNotFound + } + return json.Unmarshal(r.Kvs[0].Value, value) +} + +func (c *conn) listAuthRequests(ctx context.Context) (reqs []AuthRequest, err error) { + res, err := c.db.Get(ctx, authRequestPrefix, clientv3.WithPrefix()) + if err != nil { + return reqs, err + } + for _, v := range res.Kvs { + var r AuthRequest + if err = json.Unmarshal(v.Value, &r); err != nil { + return reqs, err + } + reqs = append(reqs, r) + } + return reqs, nil +} + +func (c *conn) listAuthCodes(ctx context.Context) (codes []AuthCode, err error) { + res, err := c.db.Get(ctx, authCodePrefix, clientv3.WithPrefix()) + if err != nil { + return codes, err + } + for _, v := range res.Kvs { + var c AuthCode + if err = json.Unmarshal(v.Value, &c); err != nil { + return codes, err + } + codes = append(codes, c) + } + return codes, nil +} + +func (c *conn) txnCreate(ctx context.Context, key string, value interface{}) error { + b, err := json.Marshal(value) + if err != nil { + return err + } + txn := c.db.Txn(ctx) + res, err := txn. + If(clientv3.Compare(clientv3.CreateRevision(key), "=", 0)). + Then(clientv3.OpPut(key, string(b))). + Commit() + if err != nil { + return err + } + if !res.Succeeded { + return storage.ErrAlreadyExists + } + return nil +} + +func (c *conn) txnUpdate(ctx context.Context, key string, update func(current []byte) ([]byte, error)) error { + getResp, err := c.db.Get(ctx, key) + if err != nil { + return err + } + var currentValue []byte + var modRev int64 + if len(getResp.Kvs) > 0 { + currentValue = getResp.Kvs[0].Value + modRev = getResp.Kvs[0].ModRevision + } + + updatedValue, err := update(currentValue) + if err != nil { + return err + } + + txn := c.db.Txn(ctx) + updateResp, err := txn. + If(clientv3.Compare(clientv3.ModRevision(key), "=", modRev)). + Then(clientv3.OpPut(key, string(updatedValue))). + Commit() + if err != nil { + return err + } + if !updateResp.Succeeded { + return fmt.Errorf("failed to update key=%q: concurrent conflicting update happened", key) + } + return nil +} + +func keyID(prefix, id string) string { return prefix + id } +func keyEmail(prefix, email string) string { return prefix + strings.ToLower(email) } +func keySession(userID, connID string) string { + return offlineSessionPrefix + strings.ToLower(userID+"|"+connID) +} + +func (c *conn) CreateDeviceRequest(d storage.DeviceRequest) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnCreate(ctx, keyID(deviceRequestPrefix, d.UserCode), fromStorageDeviceRequest(d)) +} + +func (c *conn) GetDeviceRequest(userCode string) (r storage.DeviceRequest, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + err = c.getKey(ctx, keyID(deviceRequestPrefix, userCode), &r) + return r, err +} + +func (c *conn) listDeviceRequests(ctx context.Context) (requests []DeviceRequest, err error) { + res, err := c.db.Get(ctx, deviceRequestPrefix, clientv3.WithPrefix()) + if err != nil { + return requests, err + } + for _, v := range res.Kvs { + var r DeviceRequest + if err = json.Unmarshal(v.Value, &r); err != nil { + return requests, err + } + requests = append(requests, r) + } + return requests, nil +} + +func (c *conn) CreateDeviceToken(t storage.DeviceToken) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnCreate(ctx, keyID(deviceTokenPrefix, t.DeviceCode), fromStorageDeviceToken(t)) +} + +func (c *conn) GetDeviceToken(deviceCode string) (t storage.DeviceToken, err error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + var dt DeviceToken + if err = c.getKey(ctx, keyID(deviceTokenPrefix, deviceCode), &dt); err == nil { + t = toStorageDeviceToken(dt) + } + return +} + +func (c *conn) listDeviceTokens(ctx context.Context) (deviceTokens []DeviceToken, err error) { + res, err := c.db.Get(ctx, deviceTokenPrefix, clientv3.WithPrefix()) + if err != nil { + return deviceTokens, err + } + for _, v := range res.Kvs { + var dt DeviceToken + if err = json.Unmarshal(v.Value, &dt); err != nil { + return deviceTokens, err + } + deviceTokens = append(deviceTokens, dt) + } + return deviceTokens, nil +} + +func (c *conn) UpdateDeviceToken(deviceCode string, updater func(old storage.DeviceToken) (storage.DeviceToken, error)) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) + defer cancel() + return c.txnUpdate(ctx, keyID(deviceTokenPrefix, deviceCode), func(currentValue []byte) ([]byte, error) { + var current DeviceToken + if len(currentValue) > 0 { + if err := json.Unmarshal(currentValue, ¤t); err != nil { + return nil, err + } + } + updated, err := updater(toStorageDeviceToken(current)) + if err != nil { + return nil, err + } + return json.Marshal(fromStorageDeviceToken(updated)) + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/etcd/types.go b/vendor/github.com/dexidp/dex/storage/etcd/types.go new file mode 100644 index 00000000..91199ab6 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/etcd/types.go @@ -0,0 +1,318 @@ +package etcd + +import ( + "time" + + jose "gopkg.in/square/go-jose.v2" + + "github.com/dexidp/dex/storage" +) + +// AuthCode is a mirrored struct from storage with JSON struct tags +type AuthCode struct { + ID string `json:"ID"` + ClientID string `json:"clientID"` + RedirectURI string `json:"redirectURI"` + Nonce string `json:"nonce,omitempty"` + Scopes []string `json:"scopes,omitempty"` + + ConnectorID string `json:"connectorID,omitempty"` + ConnectorData []byte `json:"connectorData,omitempty"` + Claims Claims `json:"claims,omitempty"` + + Expiry time.Time `json:"expiry"` + + CodeChallenge string `json:"code_challenge,omitempty"` + CodeChallengeMethod string `json:"code_challenge_method,omitempty"` +} + +func toStorageAuthCode(a AuthCode) storage.AuthCode { + return storage.AuthCode{ + ID: a.ID, + ClientID: a.ClientID, + RedirectURI: a.RedirectURI, + ConnectorID: a.ConnectorID, + ConnectorData: a.ConnectorData, + Nonce: a.Nonce, + Scopes: a.Scopes, + Claims: toStorageClaims(a.Claims), + Expiry: a.Expiry, + PKCE: storage.PKCE{ + CodeChallenge: a.CodeChallenge, + CodeChallengeMethod: a.CodeChallengeMethod, + }, + } +} + +func fromStorageAuthCode(a storage.AuthCode) AuthCode { + return AuthCode{ + ID: a.ID, + ClientID: a.ClientID, + RedirectURI: a.RedirectURI, + ConnectorID: a.ConnectorID, + ConnectorData: a.ConnectorData, + Nonce: a.Nonce, + Scopes: a.Scopes, + Claims: fromStorageClaims(a.Claims), + Expiry: a.Expiry, + CodeChallenge: a.PKCE.CodeChallenge, + CodeChallengeMethod: a.PKCE.CodeChallengeMethod, + } +} + +// AuthRequest is a mirrored struct from storage with JSON struct tags +type AuthRequest struct { + ID string `json:"id"` + ClientID string `json:"client_id"` + + ResponseTypes []string `json:"response_types"` + Scopes []string `json:"scopes"` + RedirectURI string `json:"redirect_uri"` + Nonce string `json:"nonce"` + State string `json:"state"` + + ForceApprovalPrompt bool `json:"force_approval_prompt"` + + Expiry time.Time `json:"expiry"` + + LoggedIn bool `json:"logged_in"` + + Claims Claims `json:"claims"` + + ConnectorID string `json:"connector_id"` + ConnectorData []byte `json:"connector_data"` + + CodeChallenge string `json:"code_challenge,omitempty"` + CodeChallengeMethod string `json:"code_challenge_method,omitempty"` + + HMACKey []byte `json:"hmac_key"` +} + +func fromStorageAuthRequest(a storage.AuthRequest) AuthRequest { + return AuthRequest{ + ID: a.ID, + ClientID: a.ClientID, + ResponseTypes: a.ResponseTypes, + Scopes: a.Scopes, + RedirectURI: a.RedirectURI, + Nonce: a.Nonce, + State: a.State, + ForceApprovalPrompt: a.ForceApprovalPrompt, + Expiry: a.Expiry, + LoggedIn: a.LoggedIn, + Claims: fromStorageClaims(a.Claims), + ConnectorID: a.ConnectorID, + ConnectorData: a.ConnectorData, + CodeChallenge: a.PKCE.CodeChallenge, + CodeChallengeMethod: a.PKCE.CodeChallengeMethod, + HMACKey: a.HMACKey, + } +} + +func toStorageAuthRequest(a AuthRequest) storage.AuthRequest { + return storage.AuthRequest{ + ID: a.ID, + ClientID: a.ClientID, + ResponseTypes: a.ResponseTypes, + Scopes: a.Scopes, + RedirectURI: a.RedirectURI, + Nonce: a.Nonce, + State: a.State, + ForceApprovalPrompt: a.ForceApprovalPrompt, + LoggedIn: a.LoggedIn, + ConnectorID: a.ConnectorID, + ConnectorData: a.ConnectorData, + Expiry: a.Expiry, + Claims: toStorageClaims(a.Claims), + PKCE: storage.PKCE{ + CodeChallenge: a.CodeChallenge, + CodeChallengeMethod: a.CodeChallengeMethod, + }, + HMACKey: a.HMACKey, + } +} + +// RefreshToken is a mirrored struct from storage with JSON struct tags +type RefreshToken struct { + ID string `json:"id"` + + Token string `json:"token"` + ObsoleteToken string `json:"obsolete_token"` + + CreatedAt time.Time `json:"created_at"` + LastUsed time.Time `json:"last_used"` + + ClientID string `json:"client_id"` + + ConnectorID string `json:"connector_id"` + ConnectorData []byte `json:"connector_data"` + Claims Claims `json:"claims"` + + Scopes []string `json:"scopes"` + + Nonce string `json:"nonce"` +} + +func toStorageRefreshToken(r RefreshToken) storage.RefreshToken { + return storage.RefreshToken{ + ID: r.ID, + Token: r.Token, + ObsoleteToken: r.ObsoleteToken, + CreatedAt: r.CreatedAt, + LastUsed: r.LastUsed, + ClientID: r.ClientID, + ConnectorID: r.ConnectorID, + ConnectorData: r.ConnectorData, + Scopes: r.Scopes, + Nonce: r.Nonce, + Claims: toStorageClaims(r.Claims), + } +} + +func fromStorageRefreshToken(r storage.RefreshToken) RefreshToken { + return RefreshToken{ + ID: r.ID, + Token: r.Token, + ObsoleteToken: r.ObsoleteToken, + CreatedAt: r.CreatedAt, + LastUsed: r.LastUsed, + ClientID: r.ClientID, + ConnectorID: r.ConnectorID, + ConnectorData: r.ConnectorData, + Scopes: r.Scopes, + Nonce: r.Nonce, + Claims: fromStorageClaims(r.Claims), + } +} + +// Claims is a mirrored struct from storage with JSON struct tags. +type Claims struct { + UserID string `json:"userID"` + Username string `json:"username"` + PreferredUsername string `json:"preferredUsername"` + Email string `json:"email"` + EmailVerified bool `json:"emailVerified"` + Groups []string `json:"groups,omitempty"` +} + +func fromStorageClaims(i storage.Claims) Claims { + return Claims{ + UserID: i.UserID, + Username: i.Username, + PreferredUsername: i.PreferredUsername, + Email: i.Email, + EmailVerified: i.EmailVerified, + Groups: i.Groups, + } +} + +func toStorageClaims(i Claims) storage.Claims { + return storage.Claims{ + UserID: i.UserID, + Username: i.Username, + PreferredUsername: i.PreferredUsername, + Email: i.Email, + EmailVerified: i.EmailVerified, + Groups: i.Groups, + } +} + +// Keys is a mirrored struct from storage with JSON struct tags +type Keys struct { + SigningKey *jose.JSONWebKey `json:"signing_key,omitempty"` + SigningKeyPub *jose.JSONWebKey `json:"signing_key_pub,omitempty"` + VerificationKeys []storage.VerificationKey `json:"verification_keys"` + NextRotation time.Time `json:"next_rotation"` +} + +// OfflineSessions is a mirrored struct from storage with JSON struct tags +type OfflineSessions struct { + UserID string `json:"user_id,omitempty"` + ConnID string `json:"conn_id,omitempty"` + Refresh map[string]*storage.RefreshTokenRef `json:"refresh,omitempty"` + ConnectorData []byte `json:"connectorData,omitempty"` +} + +func fromStorageOfflineSessions(o storage.OfflineSessions) OfflineSessions { + return OfflineSessions{ + UserID: o.UserID, + ConnID: o.ConnID, + Refresh: o.Refresh, + ConnectorData: o.ConnectorData, + } +} + +func toStorageOfflineSessions(o OfflineSessions) storage.OfflineSessions { + s := storage.OfflineSessions{ + UserID: o.UserID, + ConnID: o.ConnID, + Refresh: o.Refresh, + ConnectorData: o.ConnectorData, + } + if s.Refresh == nil { + // Server code assumes this will be non-nil. + s.Refresh = make(map[string]*storage.RefreshTokenRef) + } + return s +} + +// DeviceRequest is a mirrored struct from storage with JSON struct tags +type DeviceRequest struct { + UserCode string `json:"user_code"` + DeviceCode string `json:"device_code"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + Scopes []string `json:"scopes"` + Expiry time.Time `json:"expiry"` +} + +func fromStorageDeviceRequest(d storage.DeviceRequest) DeviceRequest { + return DeviceRequest{ + UserCode: d.UserCode, + DeviceCode: d.DeviceCode, + ClientID: d.ClientID, + ClientSecret: d.ClientSecret, + Scopes: d.Scopes, + Expiry: d.Expiry, + } +} + +// DeviceToken is a mirrored struct from storage with JSON struct tags +type DeviceToken struct { + DeviceCode string `json:"device_code"` + Status string `json:"status"` + Token string `json:"token"` + Expiry time.Time `json:"expiry"` + LastRequestTime time.Time `json:"last_request"` + PollIntervalSeconds int `json:"poll_interval"` + CodeChallenge string `json:"code_challenge,omitempty"` + CodeChallengeMethod string `json:"code_challenge_method,omitempty"` +} + +func fromStorageDeviceToken(t storage.DeviceToken) DeviceToken { + return DeviceToken{ + DeviceCode: t.DeviceCode, + Status: t.Status, + Token: t.Token, + Expiry: t.Expiry, + LastRequestTime: t.LastRequestTime, + PollIntervalSeconds: t.PollIntervalSeconds, + CodeChallenge: t.PKCE.CodeChallenge, + CodeChallengeMethod: t.PKCE.CodeChallengeMethod, + } +} + +func toStorageDeviceToken(t DeviceToken) storage.DeviceToken { + return storage.DeviceToken{ + DeviceCode: t.DeviceCode, + Status: t.Status, + Token: t.Token, + Expiry: t.Expiry, + LastRequestTime: t.LastRequestTime, + PollIntervalSeconds: t.PollIntervalSeconds, + PKCE: storage.PKCE{ + CodeChallenge: t.CodeChallenge, + CodeChallengeMethod: t.CodeChallengeMethod, + }, + } +} diff --git a/vendor/github.com/dexidp/dex/storage/health.go b/vendor/github.com/dexidp/dex/storage/health.go new file mode 100644 index 00000000..1b6e22c6 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/health.go @@ -0,0 +1,32 @@ +package storage + +import ( + "context" + "crypto" + "fmt" + "time" +) + +// NewCustomHealthCheckFunc returns a new health check function. +func NewCustomHealthCheckFunc(s Storage, now func() time.Time) func(context.Context) (details interface{}, err error) { + return func(_ context.Context) (details interface{}, err error) { + a := AuthRequest{ + ID: NewID(), + ClientID: NewID(), + + // Set a short expiry so if the delete fails this will be cleaned up quickly by garbage collection. + Expiry: now().Add(time.Minute), + HMACKey: NewHMACKey(crypto.SHA256), + } + + if err := s.CreateAuthRequest(a); err != nil { + return nil, fmt.Errorf("create auth request: %v", err) + } + + if err := s.DeleteAuthRequest(a.ID); err != nil { + return nil, fmt.Errorf("delete auth request: %v", err) + } + + return nil, nil + } +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/BUILD b/vendor/github.com/dexidp/dex/storage/kubernetes/BUILD new file mode 100644 index 00000000..e51c7a72 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/BUILD @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "kubernetes", + srcs = [ + "client.go", + "doc.go", + "lock.go", + "storage.go", + "transport.go", + "types.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/kubernetes", + importpath = "github.com/dexidp/dex/storage/kubernetes", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/Masterminds/semver", + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/github.com/dexidp/dex/storage", + "//vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi", + "//vendor/github.com/ghodss/yaml", + "//vendor/gopkg.in/square/go-jose.v2:go-jose_v2", + "@org_golang_x_net//http2", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/client.go b/vendor/github.com/dexidp/dex/storage/kubernetes/client.go new file mode 100644 index 00000000..fe53fb47 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/client.go @@ -0,0 +1,597 @@ +package kubernetes + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "hash/fnv" + "io" + "net" + "net/http" + "net/url" + "os" + "path" + "regexp" + "strconv" + "strings" + "time" + + "github.com/Masterminds/semver" + "github.com/ghodss/yaml" + "golang.org/x/net/http2" + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/kubernetes/k8sapi" +) + +type client struct { + client *http.Client + baseURL string + namespace string + logger log.Logger + + // Hash function to map IDs (which could span a large range) to Kubernetes names. + // While this is not currently upgradable, it could be in the future. + // + // The default hash is a non-cryptographic hash, because cryptographic hashes + // always produce sums too long to fit into a Kubernetes name. Because of this, + // gets, updates, and deletes are _always_ checked for collisions. + hash func() hash.Hash + + // API version of the oidc resources. For example "oidc.coreos.com". This is + // currently not configurable, but could be in the future. + apiVersion string + // API version of the custom resource definitions. + // Different Kubernetes version requires to create CRD in certain API. It will be discovered automatically on + // storage opening. + crdAPIVersion string + + // This is called once the client's Close method is called to signal goroutines, + // such as the one creating third party resources, to stop. + cancel context.CancelFunc +} + +// idToName maps an arbitrary ID, such as an email or client ID to a Kubernetes object name. +func (cli *client) idToName(s string) string { + return idToName(s, cli.hash) +} + +// offlineTokenName maps two arbitrary IDs, to a single Kubernetes object name. +// This is used when more than one field is used to uniquely identify the object. +func (cli *client) offlineTokenName(userID string, connID string) string { + return offlineTokenName(userID, connID, cli.hash) +} + +// Kubernetes names must match the regexp '[a-z0-9]([-a-z0-9]*[a-z0-9])?'. +var encoding = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567") + +func idToName(s string, h func() hash.Hash) string { + return strings.TrimRight(encoding.EncodeToString(h().Sum([]byte(s))), "=") +} + +func offlineTokenName(userID string, connID string, h func() hash.Hash) string { + hash := h() + hash.Write([]byte(userID)) + hash.Write([]byte(connID)) + return strings.TrimRight(encoding.EncodeToString(hash.Sum(nil)), "=") +} + +const kubeResourceMaxLen = 63 + +var kubeResourceNameRegex = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`) + +func (cli *client) urlForWithParams( + apiVersion, namespace, resource, name string, params url.Values, +) (string, error) { + basePath := "apis/" + if apiVersion == "v1" { + basePath = "api/" + } + + if name != "" && (len(name) > kubeResourceMaxLen || !kubeResourceNameRegex.MatchString(name)) { + // The actual name can be found in auth request or auth code objects and equals to the state value + return "", fmt.Errorf( + "invalid kubernetes resource name: must match the pattern %s and be no longer than %d characters", + kubeResourceNameRegex.String(), + kubeResourceMaxLen) + } + + var p string + if namespace != "" { + p = path.Join(basePath, apiVersion, "namespaces", namespace, resource, name) + } else { + p = path.Join(basePath, apiVersion, resource, name) + } + + encodedParams := params.Encode() + paramsSuffix := "" + if len(encodedParams) > 0 { + paramsSuffix = "?" + encodedParams + } + + if strings.HasSuffix(cli.baseURL, "/") { + return cli.baseURL + p + paramsSuffix, nil + } + + return cli.baseURL + "/" + p + paramsSuffix, nil +} + +func (cli *client) urlFor(apiVersion, namespace, resource, name string) (string, error) { + return cli.urlForWithParams(apiVersion, namespace, resource, name, url.Values{}) +} + +// Define an error interface so we can get at the underlying status code if it's +// absolutely necessary. For instance when we need to see if an error indicates +// a resource already exists. +type httpError interface { + StatusCode() int +} + +var _ httpError = (*httpErr)(nil) + +type httpErr struct { + method string + url string + status int + body []byte +} + +func (e *httpErr) StatusCode() int { + return e.status +} + +func (e *httpErr) Error() string { + return fmt.Sprintf("%s %s %s: response from server \"%s\"", e.method, e.url, http.StatusText(e.status), bytes.TrimSpace(e.body)) +} + +func checkHTTPErr(r *http.Response, validStatusCodes ...int) error { + for _, status := range validStatusCodes { + if r.StatusCode == status { + return nil + } + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 2<<15)) // 64 KiB + if err != nil { + return fmt.Errorf("read response body: %v", err) + } + + // Check this case after we read the body so the connection can be reused. + if r.StatusCode == http.StatusNotFound { + return storage.ErrNotFound + } + if r.Request.Method == http.MethodPost && r.StatusCode == http.StatusConflict { + return storage.ErrAlreadyExists + } + + var url, method string + if r.Request != nil { + method = r.Request.Method + url = r.Request.URL.String() + } + return &httpErr{method, url, r.StatusCode, body} +} + +// Close the response body. The initial request is drained so the connection can +// be reused. +func closeResp(r *http.Response) { + io.Copy(io.Discard, r.Body) + r.Body.Close() +} + +func (cli *client) get(resource, name string, v interface{}) error { + return cli.getResource(cli.apiVersion, cli.namespace, resource, name, v) +} + +func (cli *client) getURL(url string, v interface{}) error { + resp, err := cli.client.Get(url) + if err != nil { + return err + } + defer closeResp(resp) + if err := checkHTTPErr(resp, http.StatusOK); err != nil { + return err + } + return json.NewDecoder(resp.Body).Decode(v) +} + +func (cli *client) getResource(apiVersion, namespace, resource, name string, v interface{}) error { + u, err := cli.urlFor(apiVersion, namespace, resource, name) + if err != nil { + return err + } + return cli.getURL(u, v) +} + +func (cli *client) listN(resource string, v interface{}, n int) error { //nolint:unparam // In practice, n is the gcResultLimit constant. + params := url.Values{} + params.Add("limit", fmt.Sprintf("%d", n)) + u, err := cli.urlForWithParams(cli.apiVersion, cli.namespace, resource, "", params) + if err != nil { + return err + } + return cli.getURL(u, v) +} + +func (cli *client) list(resource string, v interface{}) error { + return cli.get(resource, "", v) +} + +func (cli *client) post(resource string, v interface{}) error { + return cli.postResource(cli.apiVersion, cli.namespace, resource, v) +} + +func (cli *client) postResource(apiVersion, namespace, resource string, v interface{}) error { + body, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("marshal object: %v", err) + } + + url, err := cli.urlFor(apiVersion, namespace, resource, "") + if err != nil { + return err + } + resp, err := cli.client.Post(url, "application/json", bytes.NewReader(body)) + if err != nil { + return err + } + defer closeResp(resp) + return checkHTTPErr(resp, http.StatusCreated) +} + +func (cli *client) detectKubernetesVersion() error { + var version struct{ GitVersion string } + + url := cli.baseURL + "/version" + resp, err := cli.client.Get(url) + if err != nil { + return err + } + + defer closeResp(resp) + if err := checkHTTPErr(resp, http.StatusOK); err != nil { + return err + } + + if err := json.NewDecoder(resp.Body).Decode(&version); err != nil { + return err + } + + clusterVersion, err := semver.NewVersion(version.GitVersion) + if err != nil { + cli.logger.Warnf("cannot detect Kubernetes version (%s): %v", clusterVersion, err) + return nil + } + + if clusterVersion.LessThan(semver.MustParse("v1.16.0")) { + cli.crdAPIVersion = legacyCRDAPIVersion + } + + return nil +} + +func (cli *client) delete(resource, name string) error { + url, err := cli.urlFor(cli.apiVersion, cli.namespace, resource, name) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return fmt.Errorf("create delete request: %v", err) + } + resp, err := cli.client.Do(req) + if err != nil { + return fmt.Errorf("delete request: %v", err) + } + defer closeResp(resp) + return checkHTTPErr(resp, http.StatusOK) +} + +func (cli *client) deleteAll(resource string) error { + var list struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + Items []struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + } `json:"items"` + } + if err := cli.list(resource, &list); err != nil { + return err + } + for _, item := range list.Items { + if err := cli.delete(resource, item.Name); err != nil { + return err + } + } + return nil +} + +func (cli *client) put(resource, name string, v interface{}) error { + body, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("marshal object: %v", err) + } + + url, err := cli.urlFor(cli.apiVersion, cli.namespace, resource, name) + if err != nil { + return err + } + + req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) + if err != nil { + return fmt.Errorf("create patch request: %v", err) + } + + req.Header.Set("Content-Length", strconv.Itoa(len(body))) + + resp, err := cli.client.Do(req) + if err != nil { + return fmt.Errorf("patch request: %v", err) + } + defer closeResp(resp) + + return checkHTTPErr(resp, http.StatusOK) +} + +// Copied from https://github.com/gtank/cryptopasta +func defaultTLSConfig() *tls.Config { + return &tls.Config{ + // Avoids most of the memorably-named TLS attacks + MinVersion: tls.VersionTLS12, + // Causes servers to use Go's default ciphersuite preferences, + // which are tuned to avoid attacks. Does nothing on clients. + PreferServerCipherSuites: true, + // Only use curves which have constant-time implementations + CurvePreferences: []tls.CurveID{ + tls.CurveP256, + }, + } +} + +func newClient(cluster k8sapi.Cluster, user k8sapi.AuthInfo, namespace string, logger log.Logger, inCluster bool) (*client, error) { + tlsConfig := defaultTLSConfig() + data := func(b string, file string) ([]byte, error) { + if b != "" { + return base64.StdEncoding.DecodeString(b) + } + if file == "" { + return nil, nil + } + return os.ReadFile(file) + } + + if caData, err := data(cluster.CertificateAuthorityData, cluster.CertificateAuthority); err != nil { + return nil, err + } else if caData != nil { + tlsConfig.RootCAs = x509.NewCertPool() + if !tlsConfig.RootCAs.AppendCertsFromPEM(caData) { + return nil, fmt.Errorf("no certificate data found: %v", err) + } + } + + clientCert, err := data(user.ClientCertificateData, user.ClientCertificate) + if err != nil { + return nil, err + } + clientKey, err := data(user.ClientKeyData, user.ClientKey) + if err != nil { + return nil, err + } + if clientCert != nil && clientKey != nil { + cert, err := tls.X509KeyPair(clientCert, clientKey) + if err != nil { + return nil, fmt.Errorf("failed to load client cert: %v", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + var t http.RoundTripper + httpTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSClientConfig: tlsConfig, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + + // Since we set a custom TLS client config we have to explicitly + // enable HTTP/2. + // + // https://github.com/golang/go/blob/go1.7.4/src/net/http/transport.go#L200-L206 + if err := http2.ConfigureTransport(httpTransport); err != nil { + return nil, err + } + t = wrapRoundTripper(httpTransport, user, inCluster) + + apiVersion := "dex.coreos.com/v1" + + logger.Infof("kubernetes client apiVersion = %s", apiVersion) + return &client{ + client: &http.Client{ + Transport: t, + Timeout: 15 * time.Second, + }, + baseURL: cluster.Server, + hash: func() hash.Hash { return fnv.New64() }, + namespace: namespace, + apiVersion: apiVersion, + crdAPIVersion: crdAPIVersion, + logger: logger, + }, nil +} + +func loadKubeConfig(kubeConfigPath string) (cluster k8sapi.Cluster, user k8sapi.AuthInfo, namespace string, err error) { + data, err := os.ReadFile(kubeConfigPath) + if err != nil { + err = fmt.Errorf("read %s: %v", kubeConfigPath, err) + return + } + + var c k8sapi.Config + if err = yaml.Unmarshal(data, &c); err != nil { + err = fmt.Errorf("unmarshal %s: %v", kubeConfigPath, err) + return + } + + cluster, user, namespace, err = currentContext(&c) + if namespace == "" { + namespace = "default" + } + return +} + +func namespaceFromServiceAccountJWT(s string) (string, error) { + // The service account token is just a JWT. Parse it as such. + parts := strings.Split(s, ".") + if len(parts) < 2 { + // It's extremely important we don't log the actual service account token. + return "", fmt.Errorf("malformed service account token: expected 3 parts got %d", len(parts)) + } + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + return "", fmt.Errorf("malformed service account token: %v", err) + } + var data struct { + // The claim Kubernetes uses to identify which namespace a service account belongs to. + // + // See: https://github.com/kubernetes/kubernetes/blob/v1.4.3/pkg/serviceaccount/jwt.go#L42 + Namespace string `json:"kubernetes.io/serviceaccount/namespace"` + } + if err := json.Unmarshal(payload, &data); err != nil { + return "", fmt.Errorf("malformed service account token: %v", err) + } + if data.Namespace == "" { + return "", errors.New(`jwt claim "kubernetes.io/serviceaccount/namespace" not found`) + } + return data.Namespace, nil +} + +func namespaceFromFile(path string) (string, error) { + data, err := os.ReadFile(path) + if err != nil { + return "", err + } + + return string(data), nil +} + +func getInClusterConfigNamespace(token, namespaceENV, namespacePath string) (string, error) { + namespace := os.Getenv(namespaceENV) + if namespace != "" { + return namespace, nil + } + + namespace, err := namespaceFromServiceAccountJWT(token) + if err == nil { + return namespace, nil + } + + err = fmt.Errorf("inspect service account token: %v", err) + namespace, fileErr := namespaceFromFile(namespacePath) + if fileErr == nil { + return namespace, nil + } + + return "", fmt.Errorf("%v: trying to get namespace from file: %v", err, fileErr) +} + +func inClusterConfig() (k8sapi.Cluster, k8sapi.AuthInfo, string, error) { + const ( + serviceAccountPath = "/var/run/secrets/kubernetes.io/serviceaccount/" + serviceAccountTokenPath = serviceAccountPath + "token" + serviceAccountCAPath = serviceAccountPath + "ca.crt" + serviceAccountNamespacePath = serviceAccountPath + "namespace" + + kubernetesServiceHostENV = "KUBERNETES_SERVICE_HOST" + kubernetesServicePortENV = "KUBERNETES_SERVICE_PORT" + kubernetesPodNamespaceENV = "KUBERNETES_POD_NAMESPACE" + ) + + host, port := os.Getenv(kubernetesServiceHostENV), os.Getenv(kubernetesServicePortENV) + if len(host) == 0 || len(port) == 0 { + return k8sapi.Cluster{}, k8sapi.AuthInfo{}, "", fmt.Errorf( + "unable to load in-cluster configuration, %s and %s must be defined", + kubernetesServiceHostENV, + kubernetesServicePortENV, + ) + } + // we need to wrap IPv6 addresses in square brackets + // IPv4 also works with square brackets + host = "[" + host + "]" + cluster := k8sapi.Cluster{ + Server: "https://" + host + ":" + port, + CertificateAuthority: serviceAccountCAPath, + } + + token, err := os.ReadFile(serviceAccountTokenPath) + if err != nil { + return cluster, k8sapi.AuthInfo{}, "", err + } + + user := k8sapi.AuthInfo{Token: string(token)} + + namespace, err := getInClusterConfigNamespace(user.Token, kubernetesPodNamespaceENV, serviceAccountNamespacePath) + if err != nil { + return cluster, user, "", err + } + + return cluster, user, namespace, nil +} + +func currentContext(config *k8sapi.Config) (cluster k8sapi.Cluster, user k8sapi.AuthInfo, ns string, err error) { + if config.CurrentContext == "" { + if len(config.Contexts) == 1 { + config.CurrentContext = config.Contexts[0].Name + } else { + return cluster, user, "", errors.New("kubeconfig has no current context") + } + } + k8sContext, ok := func() (k8sapi.Context, bool) { + for _, namedContext := range config.Contexts { + if namedContext.Name == config.CurrentContext { + return namedContext.Context, true + } + } + return k8sapi.Context{}, false + }() + if !ok { + return cluster, user, "", fmt.Errorf("no context named %q found", config.CurrentContext) + } + + cluster, ok = func() (k8sapi.Cluster, bool) { + for _, namedCluster := range config.Clusters { + if namedCluster.Name == k8sContext.Cluster { + return namedCluster.Cluster, true + } + } + return k8sapi.Cluster{}, false + }() + if !ok { + return cluster, user, "", fmt.Errorf("no cluster named %q found", k8sContext.Cluster) + } + + user, ok = func() (k8sapi.AuthInfo, bool) { + for _, namedAuthInfo := range config.AuthInfos { + if namedAuthInfo.Name == k8sContext.AuthInfo { + return namedAuthInfo.AuthInfo, true + } + } + return k8sapi.AuthInfo{}, false + }() + if !ok { + return cluster, user, "", fmt.Errorf("no user named %q found", k8sContext.AuthInfo) + } + return cluster, user, k8sContext.Namespace, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/doc.go b/vendor/github.com/dexidp/dex/storage/kubernetes/doc.go new file mode 100644 index 00000000..1112e397 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/doc.go @@ -0,0 +1,2 @@ +// Package kubernetes provides a storage implementation using Kubernetes third party APIs. +package kubernetes diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/BUILD b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/BUILD new file mode 100644 index 00000000..799d1097 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/BUILD @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "k8sapi", + srcs = [ + "client.go", + "crd_extensions.go", + "doc.go", + "extensions.go", + "time.go", + "unversioned.go", + "v1.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi", + importpath = "github.com/dexidp/dex/storage/kubernetes/k8sapi", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/client.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/client.go new file mode 100644 index 00000000..9320a297 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/client.go @@ -0,0 +1,147 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8sapi + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user. +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + Kind string `json:"kind,omitempty"` + // Deprecated: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify + // a single value for the cluster version. + // This field isn't really needed anyway, so we are deprecating it without replacement. + // It will be ignored if it is present. + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referenceable names to cluster configs + Clusters []NamedCluster `json:"clusters"` + // AuthInfos is a map of referenceable names to user configs + AuthInfos []NamedAuthInfo `json:"users"` + // Contexts is a map of referenceable names to context configs + Contexts []NamedContext `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Preferences contains information about the users command line experience preferences. +type Preferences struct { + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster. +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + APIVersion string `json:"api-version,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + // + // NOTE(ericchiang): Our yaml parser doesn't assume []byte is a base64 encoded string. + CertificateAuthorityData string `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // ClientCertificate is the path to a client cert file for TLS. + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + // + // NOTE(ericchiang): Our yaml parser doesn't assume []byte is a base64 encoded string. + ClientCertificateData string `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + // + // NOTE(ericchiang): Our yaml parser doesn't assume []byte is a base64 encoded string. + ClientKeyData string `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + Token string `json:"token,omitempty"` + // Impersonate is the username to impersonate. The name matches the flag. + Impersonate string `json:"as,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), +// a user (how do I identify myself), and a namespace (what subset of resources do I want to work with). +type Context struct { + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// NamedCluster relates nicknames to cluster information +type NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster Cluster `json:"cluster"` +} + +// NamedContext relates nicknames to context information +type NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context Context `json:"context"` +} + +// NamedAuthInfo relates nicknames to auth information +type NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo AuthInfo `json:"user"` +} + +// NamedExtension relates nicknames to extension information +type NamedExtension struct { + // Name is the nickname for this Extension + Name string `json:"name"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config"` +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/crd_extensions.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/crd_extensions.go new file mode 100644 index 00000000..d108865a --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/crd_extensions.go @@ -0,0 +1,176 @@ +/* +Copyright 2017 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8sapi + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +type CustomResourceDefinitionSpec struct { + // Group is the group this resource belongs in + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + // Version is the version this resource belongs in + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` + // Names are the names used to describe this custom resource + Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` + + // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced + Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` + // versions is the list of all API versions of the defined custom resource. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + Versions []CustomResourceDefinitionVersion `json:"versions" protobuf:"bytes,7,rep,name=versions"` +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +type CustomResourceDefinitionNames struct { + // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration + // too: plural.group and it must be all lowercase. + Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` + // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased + Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` + // ShortNames are short names for the resource. It must be all lowercase. + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` + // Kind is the serialized kind of the resource. It is normally CamelCase and singular. + Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` + // ListKind is the serialized kind of the list for this resource. Defaults to List. + ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` +} + +// ResourceScope is an enum defining the different scopes available to a custom resource +type ResourceScope string + +const ( + // ClusterScoped is the `cluster` scope for a custom resource. + ClusterScoped ResourceScope = "Cluster" + // NamespaceScoped is the `namespaced` scope for a custom resource. + NamespaceScoped ResourceScope = "Namespaced" +) + +// ConditionStatus reflects if a resource +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type +type CustomResourceDefinitionConditionType string + +const ( + // Established means that the resource has become active. A resource is established when all names are + // accepted without a conflict for the first time. A resource stays established until deleted, even during + // a later NamesAccepted due to changed names. Note that not all names can be changed. + Established CustomResourceDefinitionConditionType = "Established" + // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in + // the group and are therefore accepted. + NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. + Terminating CustomResourceDefinitionConditionType = "Terminating" +) + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +type CustomResourceDefinitionCondition struct { + // Type is the type of the condition. + Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +type CustomResourceDefinitionStatus struct { + // Conditions indicate state for particular aspects of a CustomResourceDefinition + Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` + + // AcceptedNames are the names that are actually being used to serve discovery + // They may be different than the names in spec. + AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` +} + +// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of +// a CustomResourceDefinition +const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +type CustomResourceDefinition struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes how the user wants the resources to appear + Spec CustomResourceDefinitionSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status indicates the actual state of the CustomResourceDefinition + Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +type CustomResourceDefinitionList struct { + TypeMeta `json:",inline"` + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items individual CustomResourceDefinitions + Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +type CustomResourceDefinitionVersion struct { + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // served is a flag enabling/disabling this version from being served via REST APIs + Served bool `json:"served" protobuf:"varint,2,opt,name=served"` + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` + // schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. + // +optional + Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +type CustomResourceValidation struct { + // OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. + OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` +} + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +type JSONSchemaProps struct { + Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/doc.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/doc.go new file mode 100644 index 00000000..cdefdbdb --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/doc.go @@ -0,0 +1,2 @@ +// Package k8sapi holds vendored Kubernetes types. +package k8sapi diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/extensions.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/extensions.go new file mode 100644 index 00000000..8a7dbfd6 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/extensions.go @@ -0,0 +1,23 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8sapi + +// An APIVersion represents a single concrete version of an object model. +type APIVersion struct { + // Name of this version (e.g. 'v1'). + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/time.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/time.go new file mode 100644 index 00000000..5b60bfdd --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/time.go @@ -0,0 +1,138 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8sapi + +import ( + "encoding/json" + "time" +) + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +type Time struct { + time.Time `protobuf:"-"` +} + +// NewTime returns a wrapped instance of the provided time +func NewTime(time time.Time) Time { + return Time{time} +} + +// Date returns the Time corresponding to the supplied parameters +// by wrapping time.Date. +func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { + return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// Now returns the current local time. +func Now() Time { + return Time{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *Time) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t Time) Before(u Time) bool { + return t.Time.Before(u.Time) +} + +// Equal reports whether the time instant t is equal to u. +func (t Time) Equal(u Time) bool { + return t.Time.Equal(u.Time) +} + +// Unix returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func Unix(sec int64, nsec int64) Time { + return Time{time.Unix(sec, nsec)} +} + +// Rfc3339Copy returns a copy of the Time at second-level precision. +func (t Time) Rfc3339Copy() Time { + copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339)) + return Time{copied} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *Time) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + json.Unmarshal(b, &str) + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *Time) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + + return json.Marshal(t.UTC().Format(time.RFC3339)) +} + +// MarshalQueryParameter converts to a URL query parameter value +func (t Time) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(time.RFC3339), nil +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/unversioned.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/unversioned.go new file mode 100644 index 00000000..f123f529 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/unversioned.go @@ -0,0 +1,52 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8sapi + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +type TypeMeta struct { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#resources + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +type ListMeta struct { + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/v1.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/v1.go new file mode 100644 index 00000000..55825e65 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/v1.go @@ -0,0 +1,162 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8sapi + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#idempotency + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids + UID string `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource will be deleted (no longer visible from + // resource lists, and not reachable by name) after the time in this field. Once set, this + // value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet + // will send a hard termination signal to the container. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md + // TODO: replace map[string]string with labels.LabelSet type + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/annotations.md + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` +} + +// OwnerReference contains enough information to let you identify an owning +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. +type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + UID string `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` + // If true, this reference points to the managing controller. + Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/lock.go b/vendor/github.com/dexidp/dex/storage/kubernetes/lock.go new file mode 100644 index 00000000..12075e81 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/lock.go @@ -0,0 +1,124 @@ +package kubernetes + +import ( + "fmt" + "time" +) + +const ( + lockAnnotation = "dexidp.com/resource-lock" + lockTimeFormat = time.RFC3339 +) + +var ( + lockTimeout = 10 * time.Second + lockCheckPeriod = 100 * time.Millisecond +) + +// refreshTokenLock is an implementation of annotation-based optimistic locking. +// +// Refresh token contains data to refresh identity in external authentication system. +// There is a requirement that refresh should be called only once because of several reasons: +// - Some of OIDC providers could use the refresh token rotation feature which requires calling refresh only once. +// - Providers can limit the rate of requests to the token endpoint, which will lead to the error +// in case of many concurrent requests. +type refreshTokenLock struct { + cli *client + waitingState bool +} + +func newRefreshTokenLock(cli *client) *refreshTokenLock { + return &refreshTokenLock{cli: cli} +} + +func (l *refreshTokenLock) Lock(id string) error { + for i := 0; i <= 60; i++ { + ok, err := l.setLockAnnotation(id) + if err != nil { + return err + } + if !ok { + return nil + } + time.Sleep(lockCheckPeriod) + } + return fmt.Errorf("timeout waiting for refresh token %s lock", id) +} + +func (l *refreshTokenLock) Unlock(id string) { + if l.waitingState { + // Do not need to unlock for waiting goroutines, because the have not set it. + return + } + + r, err := l.cli.getRefreshToken(id) + if err != nil { + l.cli.logger.Debugf("failed to get resource to release lock for refresh token %s: %v", id, err) + return + } + + r.Annotations = nil + err = l.cli.put(resourceRefreshToken, r.ObjectMeta.Name, r) + if err != nil { + l.cli.logger.Debugf("failed to release lock for refresh token %s: %v", id, err) + } +} + +func (l *refreshTokenLock) setLockAnnotation(id string) (bool, error) { + r, err := l.cli.getRefreshToken(id) + if err != nil { + return false, err + } + + currentTime := time.Now() + lockData := map[string]string{ + lockAnnotation: currentTime.Add(lockTimeout).Format(lockTimeFormat), + } + + val, ok := r.Annotations[lockAnnotation] + if !ok { + if l.waitingState { + return false, nil + } + + r.Annotations = lockData + err := l.cli.put(resourceRefreshToken, r.ObjectMeta.Name, r) + if err == nil { + return false, nil + } + + if isKubernetesAPIConflictError(err) { + l.waitingState = true + return true, nil + } + return false, err + } + + until, err := time.Parse(lockTimeFormat, val) + if err != nil { + return false, fmt.Errorf("lock annotation value is malformed: %v", err) + } + + if !currentTime.After(until) { + // waiting for the lock to be released + l.waitingState = true + return true, nil + } + + // Lock time is out, lets break the lock and take the advantage + r.Annotations = lockData + + err = l.cli.put(resourceRefreshToken, r.ObjectMeta.Name, r) + if err == nil { + // break lock annotation + return false, nil + } + + l.cli.logger.Debugf("break lock annotation error: %v", err) + if isKubernetesAPIConflictError(err) { + l.waitingState = true + // after breaking error waiting for the lock to be released + return true, nil + } + return false, err +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/storage.go b/vendor/github.com/dexidp/dex/storage/kubernetes/storage.go new file mode 100644 index 00000000..0979f14a --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/storage.go @@ -0,0 +1,769 @@ +package kubernetes + +import ( + "context" + "errors" + "fmt" + "math/rand" + "net/http" + "strings" + "time" + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/kubernetes/k8sapi" +) + +const ( + kindAuthCode = "AuthCode" + kindAuthRequest = "AuthRequest" + kindClient = "OAuth2Client" + kindRefreshToken = "RefreshToken" + kindKeys = "SigningKey" + kindPassword = "Password" + kindOfflineSessions = "OfflineSessions" + kindConnector = "Connector" + kindDeviceRequest = "DeviceRequest" + kindDeviceToken = "DeviceToken" +) + +const ( + resourceAuthCode = "authcodes" + resourceAuthRequest = "authrequests" + resourceClient = "oauth2clients" + resourceRefreshToken = "refreshtokens" + resourceKeys = "signingkeies" // Kubernetes attempts to pluralize. + resourcePassword = "passwords" + resourceOfflineSessions = "offlinesessionses" // Again attempts to pluralize. + resourceConnector = "connectors" + resourceDeviceRequest = "devicerequests" + resourceDeviceToken = "devicetokens" +) + +const ( + gcResultLimit = 500 +) + +// Config values for the Kubernetes storage type. +type Config struct { + InCluster bool `json:"inCluster"` + KubeConfigFile string `json:"kubeConfigFile"` +} + +// Open returns a storage using Kubernetes third party resource. +func (c *Config) Open(logger log.Logger) (storage.Storage, error) { + cli, err := c.open(logger, false) + if err != nil { + return nil, err + } + return cli, nil +} + +// open returns a kubernetes client, initializing the third party resources used +// by dex. +// +// waitForResources controls if errors creating the resources cause this method to return +// immediately (used during testing), or if the client will asynchronously retry. +func (c *Config) open(logger log.Logger, waitForResources bool) (*client, error) { + if c.InCluster && (c.KubeConfigFile != "") { + return nil, errors.New("cannot specify both 'inCluster' and 'kubeConfigFile'") + } + if !c.InCluster && (c.KubeConfigFile == "") { + return nil, errors.New("must specify either 'inCluster' or 'kubeConfigFile'") + } + + var ( + cluster k8sapi.Cluster + user k8sapi.AuthInfo + namespace string + err error + ) + if c.InCluster { + cluster, user, namespace, err = inClusterConfig() + } else { + cluster, user, namespace, err = loadKubeConfig(c.KubeConfigFile) + } + if err != nil { + return nil, err + } + + cli, err := newClient(cluster, user, namespace, logger, c.InCluster) + if err != nil { + return nil, fmt.Errorf("create client: %v", err) + } + + if err = cli.detectKubernetesVersion(); err != nil { + return nil, fmt.Errorf("cannot get kubernetes version: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + + logger.Info("creating custom Kubernetes resources") + if !cli.registerCustomResources() { + if waitForResources { + cancel() + return nil, fmt.Errorf("failed creating custom resources") + } + + // Try to synchronously create the custom resources once. This doesn't mean + // they'll immediately be available, but ensures that the client will actually try + // once. + go func() { + for { + if cli.registerCustomResources() { + return + } + + select { + case <-ctx.Done(): + return + case <-time.After(30 * time.Second): + } + } + }() + } + + if waitForResources { + if err := cli.waitForCRDs(ctx); err != nil { + cancel() + return nil, err + } + } + + // If the client is closed, stop trying to create resources. + cli.cancel = cancel + return cli, nil +} + +// registerCustomResources attempts to create the custom resources dex +// requires or identifies that they're already enabled. This function creates +// custom resource definitions(CRDs) +// It logs all errors, returning true if the resources were created successfully. +// +// Creating a custom resource does not mean that they'll be immediately available. +func (cli *client) registerCustomResources() (ok bool) { + ok = true + + definitions := customResourceDefinitions(cli.crdAPIVersion) + length := len(definitions) + + for i := 0; i < length; i++ { + var err error + var resourceName string + + r := definitions[i] + var i interface{} + cli.logger.Infof("checking if custom resource %s has already been created...", r.ObjectMeta.Name) + if err := cli.list(r.Spec.Names.Plural, &i); err == nil { + cli.logger.Infof("The custom resource %s already available, skipping create", r.ObjectMeta.Name) + continue + } else { + cli.logger.Infof("failed to list custom resource %s, attempting to create: %v", r.ObjectMeta.Name, err) + } + + err = cli.postResource(cli.crdAPIVersion, "", "customresourcedefinitions", r) + resourceName = r.ObjectMeta.Name + + if err != nil { + switch err { + case storage.ErrAlreadyExists: + cli.logger.Infof("custom resource already created %s", resourceName) + case storage.ErrNotFound: + cli.logger.Errorf("custom resources not found, please enable the respective API group") + ok = false + default: + cli.logger.Errorf("creating custom resource %s: %v", resourceName, err) + ok = false + } + continue + } + cli.logger.Errorf("create custom resource %s", resourceName) + } + return ok +} + +// waitForCRDs waits for all CRDs to be in a ready state, and is used +// by the tests to synchronize before running conformance. +func (cli *client) waitForCRDs(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + + for _, crd := range customResourceDefinitions(cli.crdAPIVersion) { + for { + err := cli.isCRDReady(crd.Name) + if err == nil { + break + } + + cli.logger.Errorf("checking CRD: %v", err) + + select { + case <-ctx.Done(): + return errors.New("timed out waiting for CRDs to be available") + case <-time.After(time.Millisecond * 100): + } + } + } + return nil +} + +// isCRDReady determines if a CRD is ready by inspecting its conditions. +func (cli *client) isCRDReady(name string) error { + var r k8sapi.CustomResourceDefinition + err := cli.getResource(cli.crdAPIVersion, "", "customresourcedefinitions", name, &r) + if err != nil { + return fmt.Errorf("get crd %s: %v", name, err) + } + + conds := make(map[string]string) // For debugging, keep the conditions around. + for _, c := range r.Status.Conditions { + if c.Type == k8sapi.Established && c.Status == k8sapi.ConditionTrue { + return nil + } + conds[string(c.Type)] = string(c.Status) + } + return fmt.Errorf("crd %s not ready %#v", name, conds) +} + +func (cli *client) Close() error { + if cli.cancel != nil { + cli.cancel() + } + return nil +} + +func (cli *client) CreateAuthRequest(a storage.AuthRequest) error { + return cli.post(resourceAuthRequest, cli.fromStorageAuthRequest(a)) +} + +func (cli *client) CreateClient(c storage.Client) error { + return cli.post(resourceClient, cli.fromStorageClient(c)) +} + +func (cli *client) CreateAuthCode(c storage.AuthCode) error { + return cli.post(resourceAuthCode, cli.fromStorageAuthCode(c)) +} + +func (cli *client) CreatePassword(p storage.Password) error { + return cli.post(resourcePassword, cli.fromStoragePassword(p)) +} + +func (cli *client) CreateRefresh(r storage.RefreshToken) error { + return cli.post(resourceRefreshToken, cli.fromStorageRefreshToken(r)) +} + +func (cli *client) CreateOfflineSessions(o storage.OfflineSessions) error { + return cli.post(resourceOfflineSessions, cli.fromStorageOfflineSessions(o)) +} + +func (cli *client) CreateConnector(c storage.Connector) error { + return cli.post(resourceConnector, cli.fromStorageConnector(c)) +} + +func (cli *client) GetAuthRequest(id string) (storage.AuthRequest, error) { + var req AuthRequest + if err := cli.get(resourceAuthRequest, id, &req); err != nil { + return storage.AuthRequest{}, err + } + return toStorageAuthRequest(req), nil +} + +func (cli *client) GetAuthCode(id string) (storage.AuthCode, error) { + var code AuthCode + if err := cli.get(resourceAuthCode, id, &code); err != nil { + return storage.AuthCode{}, err + } + return toStorageAuthCode(code), nil +} + +func (cli *client) GetClient(id string) (storage.Client, error) { + c, err := cli.getClient(id) + if err != nil { + return storage.Client{}, err + } + return toStorageClient(c), nil +} + +func (cli *client) getClient(id string) (Client, error) { + var c Client + name := cli.idToName(id) + if err := cli.get(resourceClient, name, &c); err != nil { + return Client{}, err + } + if c.ID != id { + return Client{}, fmt.Errorf("get client: ID %q mapped to client with ID %q", id, c.ID) + } + return c, nil +} + +func (cli *client) GetPassword(email string) (storage.Password, error) { + p, err := cli.getPassword(email) + if err != nil { + return storage.Password{}, err + } + return toStoragePassword(p), nil +} + +func (cli *client) getPassword(email string) (Password, error) { + // TODO(ericchiang): Figure out whose job it is to lowercase emails. + email = strings.ToLower(email) + var p Password + name := cli.idToName(email) + if err := cli.get(resourcePassword, name, &p); err != nil { + return Password{}, err + } + if email != p.Email { + return Password{}, fmt.Errorf("get email: email %q mapped to password with email %q", email, p.Email) + } + return p, nil +} + +func (cli *client) GetKeys() (storage.Keys, error) { + var keys Keys + if err := cli.get(resourceKeys, keysName, &keys); err != nil { + return storage.Keys{}, err + } + return toStorageKeys(keys), nil +} + +func (cli *client) GetRefresh(id string) (storage.RefreshToken, error) { + r, err := cli.getRefreshToken(id) + if err != nil { + return storage.RefreshToken{}, err + } + return toStorageRefreshToken(r), nil +} + +func (cli *client) getRefreshToken(id string) (r RefreshToken, err error) { + err = cli.get(resourceRefreshToken, id, &r) + return +} + +func (cli *client) GetOfflineSessions(userID string, connID string) (storage.OfflineSessions, error) { + o, err := cli.getOfflineSessions(userID, connID) + if err != nil { + return storage.OfflineSessions{}, err + } + return toStorageOfflineSessions(o), nil +} + +func (cli *client) getOfflineSessions(userID string, connID string) (o OfflineSessions, err error) { + name := cli.offlineTokenName(userID, connID) + if err = cli.get(resourceOfflineSessions, name, &o); err != nil { + return OfflineSessions{}, err + } + if userID != o.UserID || connID != o.ConnID { + return OfflineSessions{}, fmt.Errorf("get offline session: wrong session retrieved") + } + return o, nil +} + +func (cli *client) GetConnector(id string) (storage.Connector, error) { + var c Connector + if err := cli.get(resourceConnector, id, &c); err != nil { + return storage.Connector{}, err + } + return toStorageConnector(c), nil +} + +func (cli *client) ListClients() ([]storage.Client, error) { + return nil, errors.New("not implemented") +} + +func (cli *client) ListRefreshTokens() ([]storage.RefreshToken, error) { + return nil, errors.New("not implemented") +} + +func (cli *client) ListPasswords() (passwords []storage.Password, err error) { + var passwordList PasswordList + if err = cli.list(resourcePassword, &passwordList); err != nil { + return passwords, fmt.Errorf("failed to list passwords: %v", err) + } + + for _, password := range passwordList.Passwords { + p := storage.Password{ + Email: password.Email, + Hash: password.Hash, + Username: password.Username, + UserID: password.UserID, + } + passwords = append(passwords, p) + } + + return +} + +func (cli *client) ListConnectors() (connectors []storage.Connector, err error) { + var connectorList ConnectorList + if err = cli.list(resourceConnector, &connectorList); err != nil { + return connectors, fmt.Errorf("failed to list connectors: %v", err) + } + + connectors = make([]storage.Connector, len(connectorList.Connectors)) + for i, connector := range connectorList.Connectors { + connectors[i] = toStorageConnector(connector) + } + + return +} + +func (cli *client) DeleteAuthRequest(id string) error { + return cli.delete(resourceAuthRequest, id) +} + +func (cli *client) DeleteAuthCode(code string) error { + return cli.delete(resourceAuthCode, code) +} + +func (cli *client) DeleteClient(id string) error { + // Check for hash collision. + c, err := cli.getClient(id) + if err != nil { + return err + } + return cli.delete(resourceClient, c.ObjectMeta.Name) +} + +func (cli *client) DeleteRefresh(id string) error { + return cli.delete(resourceRefreshToken, id) +} + +func (cli *client) DeletePassword(email string) error { + // Check for hash collision. + p, err := cli.getPassword(email) + if err != nil { + return err + } + return cli.delete(resourcePassword, p.ObjectMeta.Name) +} + +func (cli *client) DeleteOfflineSessions(userID string, connID string) error { + // Check for hash collision. + o, err := cli.getOfflineSessions(userID, connID) + if err != nil { + return err + } + return cli.delete(resourceOfflineSessions, o.ObjectMeta.Name) +} + +func (cli *client) DeleteConnector(id string) error { + return cli.delete(resourceConnector, id) +} + +func (cli *client) UpdateRefreshToken(id string, updater func(old storage.RefreshToken) (storage.RefreshToken, error)) error { + lock := newRefreshTokenLock(cli) + + if err := lock.Lock(id); err != nil { + return err + } + defer lock.Unlock(id) + + return retryOnConflict(context.TODO(), func() error { + r, err := cli.getRefreshToken(id) + if err != nil { + return err + } + + updated, err := updater(toStorageRefreshToken(r)) + if err != nil { + return err + } + updated.ID = id + + newToken := cli.fromStorageRefreshToken(updated) + newToken.ObjectMeta = r.ObjectMeta + + return cli.put(resourceRefreshToken, r.ObjectMeta.Name, newToken) + }) +} + +func (cli *client) UpdateClient(id string, updater func(old storage.Client) (storage.Client, error)) error { + c, err := cli.getClient(id) + if err != nil { + return err + } + + updated, err := updater(toStorageClient(c)) + if err != nil { + return err + } + updated.ID = c.ID + + newClient := cli.fromStorageClient(updated) + newClient.ObjectMeta = c.ObjectMeta + return cli.put(resourceClient, c.ObjectMeta.Name, newClient) +} + +func (cli *client) UpdatePassword(email string, updater func(old storage.Password) (storage.Password, error)) error { + p, err := cli.getPassword(email) + if err != nil { + return err + } + + updated, err := updater(toStoragePassword(p)) + if err != nil { + return err + } + updated.Email = p.Email + + newPassword := cli.fromStoragePassword(updated) + newPassword.ObjectMeta = p.ObjectMeta + return cli.put(resourcePassword, p.ObjectMeta.Name, newPassword) +} + +func (cli *client) UpdateOfflineSessions(userID string, connID string, updater func(old storage.OfflineSessions) (storage.OfflineSessions, error)) error { + return retryOnConflict(context.TODO(), func() error { + o, err := cli.getOfflineSessions(userID, connID) + if err != nil { + return err + } + + updated, err := updater(toStorageOfflineSessions(o)) + if err != nil { + return err + } + + newOfflineSessions := cli.fromStorageOfflineSessions(updated) + newOfflineSessions.ObjectMeta = o.ObjectMeta + return cli.put(resourceOfflineSessions, o.ObjectMeta.Name, newOfflineSessions) + }) +} + +func (cli *client) UpdateKeys(updater func(old storage.Keys) (storage.Keys, error)) error { + firstUpdate := false + var keys Keys + if err := cli.get(resourceKeys, keysName, &keys); err != nil { + if err != storage.ErrNotFound { + return err + } + firstUpdate = true + } + + var oldKeys storage.Keys + if !firstUpdate { + oldKeys = toStorageKeys(keys) + } + + updated, err := updater(oldKeys) + if err != nil { + return err + } + + newKeys := cli.fromStorageKeys(updated) + if firstUpdate { + err = cli.post(resourceKeys, newKeys) + if err != nil && errors.Is(err, storage.ErrAlreadyExists) { + // We need to tolerate conflicts here in case of HA mode. + cli.logger.Debugf("Keys creation failed: %v. It is possible that keys have already been created by another dex instance.", err) + return errors.New("keys already created by another server instance") + } + + return err + } + + newKeys.ObjectMeta = keys.ObjectMeta + + err = cli.put(resourceKeys, keysName, newKeys) + if isKubernetesAPIConflictError(err) { + // We need to tolerate conflicts here in case of HA mode. + // Dex instances run keys rotation at the same time because they use SigningKey.nextRotation CR field as a trigger. + cli.logger.Debugf("Keys rotation failed: %v. It is possible that keys have already been rotated by another dex instance.", err) + return errors.New("keys already rotated by another server instance") + } + + return err +} + +func (cli *client) UpdateAuthRequest(id string, updater func(a storage.AuthRequest) (storage.AuthRequest, error)) error { + var req AuthRequest + err := cli.get(resourceAuthRequest, id, &req) + if err != nil { + return err + } + + updated, err := updater(toStorageAuthRequest(req)) + if err != nil { + return err + } + + newReq := cli.fromStorageAuthRequest(updated) + newReq.ObjectMeta = req.ObjectMeta + return cli.put(resourceAuthRequest, id, newReq) +} + +func (cli *client) UpdateConnector(id string, updater func(a storage.Connector) (storage.Connector, error)) error { + return retryOnConflict(context.TODO(), func() error { + var c Connector + err := cli.get(resourceConnector, id, &c) + if err != nil { + return err + } + + updated, err := updater(toStorageConnector(c)) + if err != nil { + return err + } + + newConn := cli.fromStorageConnector(updated) + newConn.ObjectMeta = c.ObjectMeta + return cli.put(resourceConnector, id, newConn) + }) +} + +func (cli *client) GarbageCollect(now time.Time) (result storage.GCResult, err error) { + var authRequests AuthRequestList + if err := cli.listN(resourceAuthRequest, &authRequests, gcResultLimit); err != nil { + return result, fmt.Errorf("failed to list auth requests: %v", err) + } + + var delErr error + for _, authRequest := range authRequests.AuthRequests { + if now.After(authRequest.Expiry) { + if err := cli.delete(resourceAuthRequest, authRequest.ObjectMeta.Name); err != nil { + cli.logger.Errorf("failed to delete auth request: %v", err) + delErr = fmt.Errorf("failed to delete auth request: %v", err) + } + result.AuthRequests++ + } + } + if delErr != nil { + return result, delErr + } + + var authCodes AuthCodeList + if err := cli.listN(resourceAuthCode, &authCodes, gcResultLimit); err != nil { + return result, fmt.Errorf("failed to list auth codes: %v", err) + } + + for _, authCode := range authCodes.AuthCodes { + if now.After(authCode.Expiry) { + if err := cli.delete(resourceAuthCode, authCode.ObjectMeta.Name); err != nil { + cli.logger.Errorf("failed to delete auth code %v", err) + delErr = fmt.Errorf("failed to delete auth code: %v", err) + } + result.AuthCodes++ + } + } + + var deviceRequests DeviceRequestList + if err := cli.listN(resourceDeviceRequest, &deviceRequests, gcResultLimit); err != nil { + return result, fmt.Errorf("failed to list device requests: %v", err) + } + + for _, deviceRequest := range deviceRequests.DeviceRequests { + if now.After(deviceRequest.Expiry) { + if err := cli.delete(resourceDeviceRequest, deviceRequest.ObjectMeta.Name); err != nil { + cli.logger.Errorf("failed to delete device request: %v", err) + delErr = fmt.Errorf("failed to delete device request: %v", err) + } + result.DeviceRequests++ + } + } + + var deviceTokens DeviceTokenList + if err := cli.listN(resourceDeviceToken, &deviceTokens, gcResultLimit); err != nil { + return result, fmt.Errorf("failed to list device tokens: %v", err) + } + + for _, deviceToken := range deviceTokens.DeviceTokens { + if now.After(deviceToken.Expiry) { + if err := cli.delete(resourceDeviceToken, deviceToken.ObjectMeta.Name); err != nil { + cli.logger.Errorf("failed to delete device token: %v", err) + delErr = fmt.Errorf("failed to delete device token: %v", err) + } + result.DeviceTokens++ + } + } + + if delErr != nil { + return result, delErr + } + return result, delErr +} + +func (cli *client) CreateDeviceRequest(d storage.DeviceRequest) error { + return cli.post(resourceDeviceRequest, cli.fromStorageDeviceRequest(d)) +} + +func (cli *client) GetDeviceRequest(userCode string) (storage.DeviceRequest, error) { + var req DeviceRequest + if err := cli.get(resourceDeviceRequest, strings.ToLower(userCode), &req); err != nil { + return storage.DeviceRequest{}, err + } + return toStorageDeviceRequest(req), nil +} + +func (cli *client) CreateDeviceToken(t storage.DeviceToken) error { + return cli.post(resourceDeviceToken, cli.fromStorageDeviceToken(t)) +} + +func (cli *client) GetDeviceToken(deviceCode string) (storage.DeviceToken, error) { + var token DeviceToken + if err := cli.get(resourceDeviceToken, deviceCode, &token); err != nil { + return storage.DeviceToken{}, err + } + return toStorageDeviceToken(token), nil +} + +func (cli *client) getDeviceToken(deviceCode string) (t DeviceToken, err error) { + err = cli.get(resourceDeviceToken, deviceCode, &t) + return +} + +func (cli *client) UpdateDeviceToken(deviceCode string, updater func(old storage.DeviceToken) (storage.DeviceToken, error)) error { + return retryOnConflict(context.TODO(), func() error { + r, err := cli.getDeviceToken(deviceCode) + if err != nil { + return err + } + updated, err := updater(toStorageDeviceToken(r)) + if err != nil { + return err + } + updated.DeviceCode = deviceCode + + newToken := cli.fromStorageDeviceToken(updated) + newToken.ObjectMeta = r.ObjectMeta + return cli.put(resourceDeviceToken, r.ObjectMeta.Name, newToken) + }) +} + +func isKubernetesAPIConflictError(err error) bool { + if httpErr, ok := err.(httpError); ok { + if httpErr.StatusCode() == http.StatusConflict { + return true + } + } + return false +} + +func retryOnConflict(ctx context.Context, action func() error) error { + policy := []int{10, 20, 100, 300, 600} + + attempts := 0 + getNextStep := func() time.Duration { + step := policy[attempts] + return time.Duration(step*5+rand.Intn(step)) * time.Microsecond + } + + if err := action(); err == nil || !isKubernetesAPIConflictError(err) { + return err + } + + for { + select { + case <-time.After(getNextStep()): + err := action() + if err == nil || !isKubernetesAPIConflictError(err) { + return err + } + + attempts++ + if attempts >= 4 { + return fmt.Errorf("maximum timeout reached while retrying a conflicted request: %w", err) + } + case <-ctx.Done(): + return errors.New("canceled") + } + } +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/transport.go b/vendor/github.com/dexidp/dex/storage/kubernetes/transport.go new file mode 100644 index 00000000..9c3cd2ba --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/transport.go @@ -0,0 +1,123 @@ +package kubernetes + +import ( + "net/http" + "os" + "sync" + "time" + + "github.com/dexidp/dex/storage/kubernetes/k8sapi" +) + +// transport is a simple http.Transport wrapper +type transport struct { + updateReq func(r *http.Request) + base http.RoundTripper +} + +func (t transport) RoundTrip(r *http.Request) (*http.Response, error) { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + t.updateReq(r2) + return t.base.RoundTrip(r2) +} + +func wrapRoundTripper(base http.RoundTripper, user k8sapi.AuthInfo, inCluster bool) http.RoundTripper { + if inCluster { + inClusterTransportHelper := newInClusterTransportHelper(user) + return transport{ + updateReq: func(r *http.Request) { + inClusterTransportHelper.UpdateToken() + r.Header.Set("Authorization", "Bearer "+inClusterTransportHelper.GetToken()) + }, + base: base, + } + } + + if user.Token != "" { + return transport{ + updateReq: func(r *http.Request) { + r.Header.Set("Authorization", "Bearer "+user.Token) + }, + base: base, + } + } + + if user.Username != "" && user.Password != "" { + return transport{ + updateReq: func(r *http.Request) { + r.SetBasicAuth(user.Username, user.Password) + }, + base: base, + } + } + + return base +} + +// renewTokenPeriod is the interval after which dex will read the token from a well-known file. +// +// By Kubernetes documentation, this interval should be at least one minute long. +// Kubernetes client-go v0.15+ uses 10 seconds long interval. +// Dex uses the reasonable value between these two. +const renewTokenPeriod = 30 * time.Second + +// inClusterTransportHelper is capable of safely updating the user token. +// +// BoundServiceAccountTokenVolume feature is enabled in Kubernetes >=1.21 by default. +// With this feature, the service account token in the pod becomes periodically updated. +// Therefore, Dex needs to re-read the token from the disk after some time to be sure that it uses the valid token. +type inClusterTransportHelper struct { + mu sync.RWMutex + info k8sapi.AuthInfo + + expiry time.Time + now func() time.Time + + tokenLocation string +} + +func newInClusterTransportHelper(info k8sapi.AuthInfo) *inClusterTransportHelper { + user := &inClusterTransportHelper{ + info: info, + now: time.Now, + tokenLocation: "/var/run/secrets/kubernetes.io/serviceaccount/token", + } + + user.UpdateToken() + + return user +} + +func (c *inClusterTransportHelper) UpdateToken() { + c.mu.RLock() + exp := c.expiry + c.mu.RUnlock() + + if !c.now().After(exp) { + // Do not need to update token yet + return + } + + token, err := os.ReadFile(c.tokenLocation) + if err != nil { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + c.info.Token = string(token) + c.expiry = c.now().Add(renewTokenPeriod) +} + +func (c *inClusterTransportHelper) GetToken() string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.info.Token +} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/types.go b/vendor/github.com/dexidp/dex/storage/kubernetes/types.go new file mode 100644 index 00000000..a5ec29af --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/kubernetes/types.go @@ -0,0 +1,854 @@ +package kubernetes + +import ( + "strings" + "time" + + jose "gopkg.in/square/go-jose.v2" + + "github.com/dexidp/dex/storage" + "github.com/dexidp/dex/storage/kubernetes/k8sapi" +) + +const ( + apiGroup = "dex.coreos.com" + + legacyCRDAPIVersion = "apiextensions.k8s.io/v1beta1" + crdAPIVersion = "apiextensions.k8s.io/v1" +) + +// The set of custom resource definitions required by the storage. These are managed by +// the storage so it can migrate itself by creating new resources. +func customResourceDefinitions(apiVersion string) []k8sapi.CustomResourceDefinition { + crdMeta := k8sapi.TypeMeta{ + APIVersion: apiVersion, + Kind: "CustomResourceDefinition", + } + + var version string + var scope k8sapi.ResourceScope + var versions []k8sapi.CustomResourceDefinitionVersion + + switch apiVersion { + case crdAPIVersion: + preserveUnknownFields := true + versions = []k8sapi.CustomResourceDefinitionVersion{ + { + Name: "v1", + Served: true, + Storage: true, + Schema: &k8sapi.CustomResourceValidation{ + OpenAPIV3Schema: &k8sapi.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: &preserveUnknownFields, + }, + }, + }, + } + scope = k8sapi.NamespaceScoped + case legacyCRDAPIVersion: + version = "v1" + default: + panic("unknown apiVersion " + apiVersion) + } + + return []k8sapi.CustomResourceDefinition{ + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "authcodes.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "authcodes", + Singular: "authcode", + Kind: "AuthCode", + }, + }, + }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "authrequests.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "authrequests", + Singular: "authrequest", + Kind: "AuthRequest", + }, + }, + }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "oauth2clients.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "oauth2clients", + Singular: "oauth2client", + Kind: "OAuth2Client", + }, + }, + }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "signingkeies.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + // `signingkeies` is an artifact from the old TPR pluralization. + // Users don't directly interact with this value, hence leaving it + // as is. + Plural: "signingkeies", + Singular: "signingkey", + Kind: "SigningKey", + }, + }, + }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "refreshtokens.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "refreshtokens", + Singular: "refreshtoken", + Kind: "RefreshToken", + }, + }, + }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "passwords.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "passwords", + Singular: "password", + Kind: "Password", + }, + }, + }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "offlinesessionses.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "offlinesessionses", + Singular: "offlinesessions", + Kind: "OfflineSessions", + }, + }, + }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "connectors.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "connectors", + Singular: "connector", + Kind: "Connector", + }, + }, + }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "devicerequests.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "devicerequests", + Singular: "devicerequest", + Kind: "DeviceRequest", + }, + }, + }, + { + ObjectMeta: k8sapi.ObjectMeta{ + Name: "devicetokens.dex.coreos.com", + }, + TypeMeta: crdMeta, + Spec: k8sapi.CustomResourceDefinitionSpec{ + Group: apiGroup, + Version: version, + Versions: versions, + Scope: scope, + Names: k8sapi.CustomResourceDefinitionNames{ + Plural: "devicetokens", + Singular: "devicetoken", + Kind: "DeviceToken", + }, + }, + }, + } +} + +// There will only ever be a single keys resource. Maintain this by setting a +// common name. +const keysName = "openid-connect-keys" + +// Client is a mirrored struct from storage with JSON struct tags and +// Kubernetes type metadata. +type Client struct { + // Name is a hash of the ID. + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + // ID is immutable, since it's a primary key and should not be changed. + ID string `json:"id,omitempty"` + + Secret string `json:"secret,omitempty"` + RedirectURIs []string `json:"redirectURIs,omitempty"` + TrustedPeers []string `json:"trustedPeers,omitempty"` + + Public bool `json:"public"` + + Name string `json:"name,omitempty"` + LogoURL string `json:"logoURL,omitempty"` +} + +// ClientList is a list of Clients. +type ClientList struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + Clients []Client `json:"items"` +} + +func (cli *client) fromStorageClient(c storage.Client) Client { + return Client{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindClient, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: cli.idToName(c.ID), + Namespace: cli.namespace, + }, + ID: c.ID, + Secret: c.Secret, + RedirectURIs: c.RedirectURIs, + TrustedPeers: c.TrustedPeers, + Public: c.Public, + Name: c.Name, + LogoURL: c.LogoURL, + } +} + +func toStorageClient(c Client) storage.Client { + return storage.Client{ + ID: c.ID, + Secret: c.Secret, + RedirectURIs: c.RedirectURIs, + TrustedPeers: c.TrustedPeers, + Public: c.Public, + Name: c.Name, + LogoURL: c.LogoURL, + } +} + +// Claims is a mirrored struct from storage with JSON struct tags. +type Claims struct { + UserID string `json:"userID"` + Username string `json:"username"` + PreferredUsername string `json:"preferredUsername"` + Email string `json:"email"` + EmailVerified bool `json:"emailVerified"` + Groups []string `json:"groups,omitempty"` +} + +func fromStorageClaims(i storage.Claims) Claims { + return Claims{ + UserID: i.UserID, + Username: i.Username, + PreferredUsername: i.PreferredUsername, + Email: i.Email, + EmailVerified: i.EmailVerified, + Groups: i.Groups, + } +} + +func toStorageClaims(i Claims) storage.Claims { + return storage.Claims{ + UserID: i.UserID, + Username: i.Username, + PreferredUsername: i.PreferredUsername, + Email: i.Email, + EmailVerified: i.EmailVerified, + Groups: i.Groups, + } +} + +// AuthRequest is a mirrored struct from storage with JSON struct tags and +// Kubernetes type metadata. +type AuthRequest struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + ClientID string `json:"clientID"` + ResponseTypes []string `json:"responseTypes,omitempty"` + Scopes []string `json:"scopes,omitempty"` + RedirectURI string `json:"redirectURI"` + + Nonce string `json:"nonce,omitempty"` + State string `json:"state,omitempty"` + + // The client has indicated that the end user must be shown an approval prompt + // on all requests. The server cannot cache their initial action for subsequent + // attempts. + ForceApprovalPrompt bool `json:"forceApprovalPrompt,omitempty"` + + LoggedIn bool `json:"loggedIn"` + + // The identity of the end user. Generally nil until the user authenticates + // with a backend. + Claims Claims `json:"claims,omitempty"` + // The connector used to login the user. Set when the user authenticates. + ConnectorID string `json:"connectorID,omitempty"` + ConnectorData []byte `json:"connectorData,omitempty"` + + Expiry time.Time `json:"expiry"` + + CodeChallenge string `json:"code_challenge,omitempty"` + CodeChallengeMethod string `json:"code_challenge_method,omitempty"` + + HMACKey []byte `json:"hmac_key"` +} + +// AuthRequestList is a list of AuthRequests. +type AuthRequestList struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + AuthRequests []AuthRequest `json:"items"` +} + +func toStorageAuthRequest(req AuthRequest) storage.AuthRequest { + a := storage.AuthRequest{ + ID: req.ObjectMeta.Name, + ClientID: req.ClientID, + ResponseTypes: req.ResponseTypes, + Scopes: req.Scopes, + RedirectURI: req.RedirectURI, + Nonce: req.Nonce, + State: req.State, + ForceApprovalPrompt: req.ForceApprovalPrompt, + LoggedIn: req.LoggedIn, + ConnectorID: req.ConnectorID, + ConnectorData: req.ConnectorData, + Expiry: req.Expiry, + Claims: toStorageClaims(req.Claims), + PKCE: storage.PKCE{ + CodeChallenge: req.CodeChallenge, + CodeChallengeMethod: req.CodeChallengeMethod, + }, + HMACKey: req.HMACKey, + } + return a +} + +func (cli *client) fromStorageAuthRequest(a storage.AuthRequest) AuthRequest { + req := AuthRequest{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindAuthRequest, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: a.ID, + Namespace: cli.namespace, + }, + ClientID: a.ClientID, + ResponseTypes: a.ResponseTypes, + Scopes: a.Scopes, + RedirectURI: a.RedirectURI, + Nonce: a.Nonce, + State: a.State, + LoggedIn: a.LoggedIn, + ForceApprovalPrompt: a.ForceApprovalPrompt, + ConnectorID: a.ConnectorID, + ConnectorData: a.ConnectorData, + Expiry: a.Expiry, + Claims: fromStorageClaims(a.Claims), + CodeChallenge: a.PKCE.CodeChallenge, + CodeChallengeMethod: a.PKCE.CodeChallengeMethod, + HMACKey: a.HMACKey, + } + return req +} + +// Password is a mirrored struct from the storage with JSON struct tags and +// Kubernetes type metadata. +type Password struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + // The Kubernetes name is actually an encoded version of this value. + // + // This field is IMMUTABLE. Do not change. + Email string `json:"email,omitempty"` + + Hash []byte `json:"hash,omitempty"` + Username string `json:"username,omitempty"` + UserID string `json:"userID,omitempty"` +} + +// PasswordList is a list of Passwords. +type PasswordList struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + Passwords []Password `json:"items"` +} + +func (cli *client) fromStoragePassword(p storage.Password) Password { + email := strings.ToLower(p.Email) + return Password{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindPassword, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: cli.idToName(email), + Namespace: cli.namespace, + }, + Email: email, + Hash: p.Hash, + Username: p.Username, + UserID: p.UserID, + } +} + +func toStoragePassword(p Password) storage.Password { + return storage.Password{ + Email: p.Email, + Hash: p.Hash, + Username: p.Username, + UserID: p.UserID, + } +} + +// AuthCode is a mirrored struct from storage with JSON struct tags and +// Kubernetes type metadata. +type AuthCode struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + ClientID string `json:"clientID"` + Scopes []string `json:"scopes,omitempty"` + RedirectURI string `json:"redirectURI"` + + Nonce string `json:"nonce,omitempty"` + State string `json:"state,omitempty"` + + Claims Claims `json:"claims,omitempty"` + + ConnectorID string `json:"connectorID,omitempty"` + ConnectorData []byte `json:"connectorData,omitempty"` + + Expiry time.Time `json:"expiry"` + + CodeChallenge string `json:"code_challenge,omitempty"` + CodeChallengeMethod string `json:"code_challenge_method,omitempty"` +} + +// AuthCodeList is a list of AuthCodes. +type AuthCodeList struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + AuthCodes []AuthCode `json:"items"` +} + +func (cli *client) fromStorageAuthCode(a storage.AuthCode) AuthCode { + return AuthCode{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindAuthCode, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: a.ID, + Namespace: cli.namespace, + }, + ClientID: a.ClientID, + RedirectURI: a.RedirectURI, + ConnectorID: a.ConnectorID, + ConnectorData: a.ConnectorData, + Nonce: a.Nonce, + Scopes: a.Scopes, + Claims: fromStorageClaims(a.Claims), + Expiry: a.Expiry, + CodeChallenge: a.PKCE.CodeChallenge, + CodeChallengeMethod: a.PKCE.CodeChallengeMethod, + } +} + +func toStorageAuthCode(a AuthCode) storage.AuthCode { + return storage.AuthCode{ + ID: a.ObjectMeta.Name, + ClientID: a.ClientID, + RedirectURI: a.RedirectURI, + ConnectorID: a.ConnectorID, + ConnectorData: a.ConnectorData, + Nonce: a.Nonce, + Scopes: a.Scopes, + Claims: toStorageClaims(a.Claims), + Expiry: a.Expiry, + PKCE: storage.PKCE{ + CodeChallenge: a.CodeChallenge, + CodeChallengeMethod: a.CodeChallengeMethod, + }, + } +} + +// RefreshToken is a mirrored struct from storage with JSON struct tags and +// Kubernetes type metadata. +type RefreshToken struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + CreatedAt time.Time + LastUsed time.Time + + ClientID string `json:"clientID"` + Scopes []string `json:"scopes,omitempty"` + + Token string `json:"token,omitempty"` + ObsoleteToken string `json:"obsoleteToken,omitempty"` + + Nonce string `json:"nonce,omitempty"` + + Claims Claims `json:"claims,omitempty"` + ConnectorID string `json:"connectorID,omitempty"` + ConnectorData []byte `json:"connectorData,omitempty"` +} + +// RefreshList is a list of refresh tokens. +type RefreshList struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + RefreshTokens []RefreshToken `json:"items"` +} + +func toStorageRefreshToken(r RefreshToken) storage.RefreshToken { + return storage.RefreshToken{ + ID: r.ObjectMeta.Name, + Token: r.Token, + ObsoleteToken: r.ObsoleteToken, + CreatedAt: r.CreatedAt, + LastUsed: r.LastUsed, + ClientID: r.ClientID, + ConnectorID: r.ConnectorID, + ConnectorData: r.ConnectorData, + Scopes: r.Scopes, + Nonce: r.Nonce, + Claims: toStorageClaims(r.Claims), + } +} + +func (cli *client) fromStorageRefreshToken(r storage.RefreshToken) RefreshToken { + return RefreshToken{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindRefreshToken, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: r.ID, + Namespace: cli.namespace, + }, + Token: r.Token, + ObsoleteToken: r.ObsoleteToken, + CreatedAt: r.CreatedAt, + LastUsed: r.LastUsed, + ClientID: r.ClientID, + ConnectorID: r.ConnectorID, + ConnectorData: r.ConnectorData, + Scopes: r.Scopes, + Nonce: r.Nonce, + Claims: fromStorageClaims(r.Claims), + } +} + +// Keys is a mirrored struct from storage with JSON struct tags and Kubernetes +// type metadata. +type Keys struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + // Key for creating and verifying signatures. These may be nil. + SigningKey *jose.JSONWebKey `json:"signingKey,omitempty"` + SigningKeyPub *jose.JSONWebKey `json:"signingKeyPub,omitempty"` + // Old signing keys which have been rotated but can still be used to validate + // existing signatures. + VerificationKeys []storage.VerificationKey `json:"verificationKeys,omitempty"` + + // The next time the signing key will rotate. + // + // For caching purposes, implementations MUST NOT update keys before this time. + NextRotation time.Time `json:"nextRotation"` +} + +func (cli *client) fromStorageKeys(keys storage.Keys) Keys { + return Keys{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindKeys, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: keysName, + Namespace: cli.namespace, + }, + SigningKey: keys.SigningKey, + SigningKeyPub: keys.SigningKeyPub, + VerificationKeys: keys.VerificationKeys, + NextRotation: keys.NextRotation, + } +} + +func toStorageKeys(keys Keys) storage.Keys { + return storage.Keys{ + SigningKey: keys.SigningKey, + SigningKeyPub: keys.SigningKeyPub, + VerificationKeys: keys.VerificationKeys, + NextRotation: keys.NextRotation, + } +} + +// OfflineSessions is a mirrored struct from storage with JSON struct tags and Kubernetes +// type metadata. +type OfflineSessions struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + UserID string `json:"userID,omitempty"` + ConnID string `json:"connID,omitempty"` + Refresh map[string]*storage.RefreshTokenRef `json:"refresh,omitempty"` + ConnectorData []byte `json:"connectorData,omitempty"` +} + +func (cli *client) fromStorageOfflineSessions(o storage.OfflineSessions) OfflineSessions { + return OfflineSessions{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindOfflineSessions, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: cli.offlineTokenName(o.UserID, o.ConnID), + Namespace: cli.namespace, + }, + UserID: o.UserID, + ConnID: o.ConnID, + Refresh: o.Refresh, + ConnectorData: o.ConnectorData, + } +} + +func toStorageOfflineSessions(o OfflineSessions) storage.OfflineSessions { + s := storage.OfflineSessions{ + UserID: o.UserID, + ConnID: o.ConnID, + Refresh: o.Refresh, + ConnectorData: o.ConnectorData, + } + if s.Refresh == nil { + // Server code assumes this will be non-nil. + s.Refresh = make(map[string]*storage.RefreshTokenRef) + } + return s +} + +// Connector is a mirrored struct from storage with JSON struct tags and Kubernetes +// type metadata. +type Connector struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + ID string `json:"id,omitempty"` + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + // Config holds connector specific configuration information + Config []byte `json:"config,omitempty"` +} + +func (cli *client) fromStorageConnector(c storage.Connector) Connector { + return Connector{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindConnector, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: c.ID, + Namespace: cli.namespace, + }, + ID: c.ID, + Type: c.Type, + Name: c.Name, + Config: c.Config, + } +} + +func toStorageConnector(c Connector) storage.Connector { + return storage.Connector{ + ID: c.ID, + Type: c.Type, + Name: c.Name, + ResourceVersion: c.ObjectMeta.ResourceVersion, + Config: c.Config, + } +} + +// ConnectorList is a list of Connectors. +type ConnectorList struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + Connectors []Connector `json:"items"` +} + +// DeviceRequest is a mirrored struct from storage with JSON struct tags and +// Kubernetes type metadata. +type DeviceRequest struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + DeviceCode string `json:"device_code,omitempty"` + ClientID string `json:"client_id,omitempty"` + ClientSecret string `json:"client_secret,omitempty"` + Scopes []string `json:"scopes,omitempty"` + Expiry time.Time `json:"expiry"` +} + +// DeviceRequestList is a list of DeviceRequests. +type DeviceRequestList struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + DeviceRequests []DeviceRequest `json:"items"` +} + +func (cli *client) fromStorageDeviceRequest(a storage.DeviceRequest) DeviceRequest { + req := DeviceRequest{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindDeviceRequest, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: strings.ToLower(a.UserCode), + Namespace: cli.namespace, + }, + DeviceCode: a.DeviceCode, + ClientID: a.ClientID, + ClientSecret: a.ClientSecret, + Scopes: a.Scopes, + Expiry: a.Expiry, + } + return req +} + +func toStorageDeviceRequest(req DeviceRequest) storage.DeviceRequest { + return storage.DeviceRequest{ + UserCode: strings.ToUpper(req.ObjectMeta.Name), + DeviceCode: req.DeviceCode, + ClientID: req.ClientID, + ClientSecret: req.ClientSecret, + Scopes: req.Scopes, + Expiry: req.Expiry, + } +} + +// DeviceToken is a mirrored struct from storage with JSON struct tags and +// Kubernetes type metadata. +type DeviceToken struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ObjectMeta `json:"metadata,omitempty"` + + Status string `json:"status,omitempty"` + Token string `json:"token,omitempty"` + Expiry time.Time `json:"expiry"` + LastRequestTime time.Time `json:"last_request"` + PollIntervalSeconds int `json:"poll_interval"` + CodeChallenge string `json:"code_challenge,omitempty"` + CodeChallengeMethod string `json:"code_challenge_method,omitempty"` +} + +// DeviceTokenList is a list of DeviceTokens. +type DeviceTokenList struct { + k8sapi.TypeMeta `json:",inline"` + k8sapi.ListMeta `json:"metadata,omitempty"` + DeviceTokens []DeviceToken `json:"items"` +} + +func (cli *client) fromStorageDeviceToken(t storage.DeviceToken) DeviceToken { + req := DeviceToken{ + TypeMeta: k8sapi.TypeMeta{ + Kind: kindDeviceToken, + APIVersion: cli.apiVersion, + }, + ObjectMeta: k8sapi.ObjectMeta{ + Name: t.DeviceCode, + Namespace: cli.namespace, + }, + Status: t.Status, + Token: t.Token, + Expiry: t.Expiry, + LastRequestTime: t.LastRequestTime, + PollIntervalSeconds: t.PollIntervalSeconds, + CodeChallenge: t.PKCE.CodeChallenge, + CodeChallengeMethod: t.PKCE.CodeChallengeMethod, + } + return req +} + +func toStorageDeviceToken(t DeviceToken) storage.DeviceToken { + return storage.DeviceToken{ + DeviceCode: t.ObjectMeta.Name, + Status: t.Status, + Token: t.Token, + Expiry: t.Expiry, + LastRequestTime: t.LastRequestTime, + PollIntervalSeconds: t.PollIntervalSeconds, + PKCE: storage.PKCE{ + CodeChallenge: t.CodeChallenge, + CodeChallengeMethod: t.CodeChallengeMethod, + }, + } +} diff --git a/vendor/github.com/dexidp/dex/storage/memory/BUILD b/vendor/github.com/dexidp/dex/storage/memory/BUILD new file mode 100644 index 00000000..8758439c --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/memory/BUILD @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "memory", + srcs = ["memory.go"], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/memory", + importpath = "github.com/dexidp/dex/storage/memory", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/github.com/dexidp/dex/storage", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/memory/memory.go b/vendor/github.com/dexidp/dex/storage/memory/memory.go new file mode 100644 index 00000000..a9406657 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/memory/memory.go @@ -0,0 +1,540 @@ +// Package memory provides an in memory implementation of the storage interface. +package memory + +import ( + "strings" + "sync" + "time" + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" +) + +// New returns an in memory storage. +func New(logger log.Logger) storage.Storage { + return &memStorage{ + clients: make(map[string]storage.Client), + authCodes: make(map[string]storage.AuthCode), + refreshTokens: make(map[string]storage.RefreshToken), + authReqs: make(map[string]storage.AuthRequest), + passwords: make(map[string]storage.Password), + offlineSessions: make(map[offlineSessionID]storage.OfflineSessions), + connectors: make(map[string]storage.Connector), + deviceRequests: make(map[string]storage.DeviceRequest), + deviceTokens: make(map[string]storage.DeviceToken), + logger: logger, + } +} + +// Config is an implementation of a storage configuration. +// +// TODO(ericchiang): Actually define a storage config interface and have registration. +type Config struct { // The in memory implementation has no config. +} + +// Open always returns a new in memory storage. +func (c *Config) Open(logger log.Logger) (storage.Storage, error) { + return New(logger), nil +} + +type memStorage struct { + mu sync.Mutex + + clients map[string]storage.Client + authCodes map[string]storage.AuthCode + refreshTokens map[string]storage.RefreshToken + authReqs map[string]storage.AuthRequest + passwords map[string]storage.Password + offlineSessions map[offlineSessionID]storage.OfflineSessions + connectors map[string]storage.Connector + deviceRequests map[string]storage.DeviceRequest + deviceTokens map[string]storage.DeviceToken + + keys storage.Keys + + logger log.Logger +} + +type offlineSessionID struct { + userID string + connID string +} + +func (s *memStorage) tx(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + f() +} + +func (s *memStorage) Close() error { return nil } + +func (s *memStorage) GarbageCollect(now time.Time) (result storage.GCResult, err error) { + s.tx(func() { + for id, a := range s.authCodes { + if now.After(a.Expiry) { + delete(s.authCodes, id) + result.AuthCodes++ + } + } + for id, a := range s.authReqs { + if now.After(a.Expiry) { + delete(s.authReqs, id) + result.AuthRequests++ + } + } + for id, a := range s.deviceRequests { + if now.After(a.Expiry) { + delete(s.deviceRequests, id) + result.DeviceRequests++ + } + } + for id, a := range s.deviceTokens { + if now.After(a.Expiry) { + delete(s.deviceTokens, id) + result.DeviceTokens++ + } + } + }) + return result, nil +} + +func (s *memStorage) CreateClient(c storage.Client) (err error) { + s.tx(func() { + if _, ok := s.clients[c.ID]; ok { + err = storage.ErrAlreadyExists + } else { + s.clients[c.ID] = c + } + }) + return +} + +func (s *memStorage) CreateAuthCode(c storage.AuthCode) (err error) { + s.tx(func() { + if _, ok := s.authCodes[c.ID]; ok { + err = storage.ErrAlreadyExists + } else { + s.authCodes[c.ID] = c + } + }) + return +} + +func (s *memStorage) CreateRefresh(r storage.RefreshToken) (err error) { + s.tx(func() { + if _, ok := s.refreshTokens[r.ID]; ok { + err = storage.ErrAlreadyExists + } else { + s.refreshTokens[r.ID] = r + } + }) + return +} + +func (s *memStorage) CreateAuthRequest(a storage.AuthRequest) (err error) { + s.tx(func() { + if _, ok := s.authReqs[a.ID]; ok { + err = storage.ErrAlreadyExists + } else { + s.authReqs[a.ID] = a + } + }) + return +} + +func (s *memStorage) CreatePassword(p storage.Password) (err error) { + lowerEmail := strings.ToLower(p.Email) + s.tx(func() { + if _, ok := s.passwords[lowerEmail]; ok { + err = storage.ErrAlreadyExists + } else { + s.passwords[lowerEmail] = p + } + }) + return +} + +func (s *memStorage) CreateOfflineSessions(o storage.OfflineSessions) (err error) { + id := offlineSessionID{ + userID: o.UserID, + connID: o.ConnID, + } + s.tx(func() { + if _, ok := s.offlineSessions[id]; ok { + err = storage.ErrAlreadyExists + } else { + s.offlineSessions[id] = o + } + }) + return +} + +func (s *memStorage) CreateConnector(connector storage.Connector) (err error) { + s.tx(func() { + if _, ok := s.connectors[connector.ID]; ok { + err = storage.ErrAlreadyExists + } else { + s.connectors[connector.ID] = connector + } + }) + return +} + +func (s *memStorage) GetAuthCode(id string) (c storage.AuthCode, err error) { + s.tx(func() { + var ok bool + if c, ok = s.authCodes[id]; !ok { + err = storage.ErrNotFound + return + } + }) + return +} + +func (s *memStorage) GetPassword(email string) (p storage.Password, err error) { + email = strings.ToLower(email) + s.tx(func() { + var ok bool + if p, ok = s.passwords[email]; !ok { + err = storage.ErrNotFound + } + }) + return +} + +func (s *memStorage) GetClient(id string) (client storage.Client, err error) { + s.tx(func() { + var ok bool + if client, ok = s.clients[id]; !ok { + err = storage.ErrNotFound + } + }) + return +} + +func (s *memStorage) GetKeys() (keys storage.Keys, err error) { + s.tx(func() { keys = s.keys }) + return +} + +func (s *memStorage) GetRefresh(id string) (tok storage.RefreshToken, err error) { + s.tx(func() { + var ok bool + if tok, ok = s.refreshTokens[id]; !ok { + err = storage.ErrNotFound + return + } + }) + return +} + +func (s *memStorage) GetAuthRequest(id string) (req storage.AuthRequest, err error) { + s.tx(func() { + var ok bool + if req, ok = s.authReqs[id]; !ok { + err = storage.ErrNotFound + return + } + }) + return +} + +func (s *memStorage) GetOfflineSessions(userID string, connID string) (o storage.OfflineSessions, err error) { + id := offlineSessionID{ + userID: userID, + connID: connID, + } + s.tx(func() { + var ok bool + if o, ok = s.offlineSessions[id]; !ok { + err = storage.ErrNotFound + return + } + }) + return +} + +func (s *memStorage) GetConnector(id string) (connector storage.Connector, err error) { + s.tx(func() { + var ok bool + if connector, ok = s.connectors[id]; !ok { + err = storage.ErrNotFound + } + }) + return +} + +func (s *memStorage) ListClients() (clients []storage.Client, err error) { + s.tx(func() { + for _, client := range s.clients { + clients = append(clients, client) + } + }) + return +} + +func (s *memStorage) ListRefreshTokens() (tokens []storage.RefreshToken, err error) { + s.tx(func() { + for _, refresh := range s.refreshTokens { + tokens = append(tokens, refresh) + } + }) + return +} + +func (s *memStorage) ListPasswords() (passwords []storage.Password, err error) { + s.tx(func() { + for _, password := range s.passwords { + passwords = append(passwords, password) + } + }) + return +} + +func (s *memStorage) ListConnectors() (conns []storage.Connector, err error) { + s.tx(func() { + for _, c := range s.connectors { + conns = append(conns, c) + } + }) + return +} + +func (s *memStorage) DeletePassword(email string) (err error) { + email = strings.ToLower(email) + s.tx(func() { + if _, ok := s.passwords[email]; !ok { + err = storage.ErrNotFound + return + } + delete(s.passwords, email) + }) + return +} + +func (s *memStorage) DeleteClient(id string) (err error) { + s.tx(func() { + if _, ok := s.clients[id]; !ok { + err = storage.ErrNotFound + return + } + delete(s.clients, id) + }) + return +} + +func (s *memStorage) DeleteRefresh(id string) (err error) { + s.tx(func() { + if _, ok := s.refreshTokens[id]; !ok { + err = storage.ErrNotFound + return + } + delete(s.refreshTokens, id) + }) + return +} + +func (s *memStorage) DeleteAuthCode(id string) (err error) { + s.tx(func() { + if _, ok := s.authCodes[id]; !ok { + err = storage.ErrNotFound + return + } + delete(s.authCodes, id) + }) + return +} + +func (s *memStorage) DeleteAuthRequest(id string) (err error) { + s.tx(func() { + if _, ok := s.authReqs[id]; !ok { + err = storage.ErrNotFound + return + } + delete(s.authReqs, id) + }) + return +} + +func (s *memStorage) DeleteOfflineSessions(userID string, connID string) (err error) { + id := offlineSessionID{ + userID: userID, + connID: connID, + } + s.tx(func() { + if _, ok := s.offlineSessions[id]; !ok { + err = storage.ErrNotFound + return + } + delete(s.offlineSessions, id) + }) + return +} + +func (s *memStorage) DeleteConnector(id string) (err error) { + s.tx(func() { + if _, ok := s.connectors[id]; !ok { + err = storage.ErrNotFound + return + } + delete(s.connectors, id) + }) + return +} + +func (s *memStorage) UpdateClient(id string, updater func(old storage.Client) (storage.Client, error)) (err error) { + s.tx(func() { + client, ok := s.clients[id] + if !ok { + err = storage.ErrNotFound + return + } + if client, err = updater(client); err == nil { + s.clients[id] = client + } + }) + return +} + +func (s *memStorage) UpdateKeys(updater func(old storage.Keys) (storage.Keys, error)) (err error) { + s.tx(func() { + var keys storage.Keys + if keys, err = updater(s.keys); err == nil { + s.keys = keys + } + }) + return +} + +func (s *memStorage) UpdateAuthRequest(id string, updater func(old storage.AuthRequest) (storage.AuthRequest, error)) (err error) { + s.tx(func() { + req, ok := s.authReqs[id] + if !ok { + err = storage.ErrNotFound + return + } + if req, err = updater(req); err == nil { + s.authReqs[id] = req + } + }) + return +} + +func (s *memStorage) UpdatePassword(email string, updater func(p storage.Password) (storage.Password, error)) (err error) { + email = strings.ToLower(email) + s.tx(func() { + req, ok := s.passwords[email] + if !ok { + err = storage.ErrNotFound + return + } + if req, err = updater(req); err == nil { + s.passwords[email] = req + } + }) + return +} + +func (s *memStorage) UpdateRefreshToken(id string, updater func(p storage.RefreshToken) (storage.RefreshToken, error)) (err error) { + s.tx(func() { + r, ok := s.refreshTokens[id] + if !ok { + err = storage.ErrNotFound + return + } + if r, err = updater(r); err == nil { + s.refreshTokens[id] = r + } + }) + return +} + +func (s *memStorage) UpdateOfflineSessions(userID string, connID string, updater func(o storage.OfflineSessions) (storage.OfflineSessions, error)) (err error) { + id := offlineSessionID{ + userID: userID, + connID: connID, + } + s.tx(func() { + r, ok := s.offlineSessions[id] + if !ok { + err = storage.ErrNotFound + return + } + if r, err = updater(r); err == nil { + s.offlineSessions[id] = r + } + }) + return +} + +func (s *memStorage) UpdateConnector(id string, updater func(c storage.Connector) (storage.Connector, error)) (err error) { + s.tx(func() { + r, ok := s.connectors[id] + if !ok { + err = storage.ErrNotFound + return + } + if r, err = updater(r); err == nil { + s.connectors[id] = r + } + }) + return +} + +func (s *memStorage) CreateDeviceRequest(d storage.DeviceRequest) (err error) { + s.tx(func() { + if _, ok := s.deviceRequests[d.UserCode]; ok { + err = storage.ErrAlreadyExists + } else { + s.deviceRequests[d.UserCode] = d + } + }) + return +} + +func (s *memStorage) GetDeviceRequest(userCode string) (req storage.DeviceRequest, err error) { + s.tx(func() { + var ok bool + if req, ok = s.deviceRequests[userCode]; !ok { + err = storage.ErrNotFound + return + } + }) + return +} + +func (s *memStorage) CreateDeviceToken(t storage.DeviceToken) (err error) { + s.tx(func() { + if _, ok := s.deviceTokens[t.DeviceCode]; ok { + err = storage.ErrAlreadyExists + } else { + s.deviceTokens[t.DeviceCode] = t + } + }) + return +} + +func (s *memStorage) GetDeviceToken(deviceCode string) (t storage.DeviceToken, err error) { + s.tx(func() { + var ok bool + if t, ok = s.deviceTokens[deviceCode]; !ok { + err = storage.ErrNotFound + return + } + }) + return +} + +func (s *memStorage) UpdateDeviceToken(deviceCode string, updater func(p storage.DeviceToken) (storage.DeviceToken, error)) (err error) { + s.tx(func() { + r, ok := s.deviceTokens[deviceCode] + if !ok { + err = storage.ErrNotFound + return + } + if r, err = updater(r); err == nil { + s.deviceTokens[deviceCode] = r + } + }) + return +} diff --git a/vendor/github.com/dexidp/dex/storage/sql/BUILD b/vendor/github.com/dexidp/dex/storage/sql/BUILD new file mode 100644 index 00000000..560470dd --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/sql/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sql", + srcs = [ + "config.go", + "crud.go", + "migrate.go", + "sql.go", + "sqlite.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/storage/sql", + importpath = "github.com/dexidp/dex/storage/sql", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/dexidp/dex/pkg/log", + "//vendor/github.com/dexidp/dex/storage", + "//vendor/github.com/go-sql-driver/mysql", + "//vendor/github.com/lib/pq", + "//vendor/github.com/mattn/go-sqlite3", + ], +) diff --git a/vendor/github.com/dexidp/dex/storage/sql/config.go b/vendor/github.com/dexidp/dex/storage/sql/config.go new file mode 100644 index 00000000..8b782425 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/sql/config.go @@ -0,0 +1,344 @@ +package sql + +import ( + "crypto/tls" + "crypto/x509" + "database/sql" + "fmt" + "net" + "os" + "regexp" + "strconv" + "strings" + "time" + + "github.com/go-sql-driver/mysql" + "github.com/lib/pq" + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" +) + +const ( + // postgres error codes + pgErrUniqueViolation = "23505" // unique_violation +) + +const ( + // MySQL error codes + mysqlErrDupEntry = 1062 + mysqlErrDupEntryWithKeyName = 1586 + mysqlErrUnknownSysVar = 1193 +) + +//nolint +const ( + // postgres SSL modes + pgSSLDisable = "disable" + pgSSLRequire = "require" + pgSSLVerifyCA = "verify-ca" + pgSSLVerifyFull = "verify-full" +) + +//nolint +const ( + // MySQL SSL modes + mysqlSSLTrue = "true" + mysqlSSLFalse = "false" + mysqlSSLSkipVerify = "skip-verify" + mysqlSSLCustom = "custom" +) + +// NetworkDB contains options common to SQL databases accessed over network. +type NetworkDB struct { + Database string + User string + Password string + Host string + Port uint16 + + ConnectionTimeout int // Seconds + + // database/sql tunables, see + // https://golang.org/pkg/database/sql/#DB.SetConnMaxLifetime and below + // Note: defaults will be set if these are 0 + MaxOpenConns int // default: 5 + MaxIdleConns int // default: 5 + ConnMaxLifetime int // Seconds, default: not set +} + +// SSL represents SSL options for network databases. +type SSL struct { + Mode string + CAFile string + // Files for client auth. + KeyFile string + CertFile string +} + +// Postgres options for creating an SQL db. +type Postgres struct { + NetworkDB + + SSL SSL `json:"ssl" yaml:"ssl"` +} + +// Open creates a new storage implementation backed by Postgres. +func (p *Postgres) Open(logger log.Logger) (storage.Storage, error) { + conn, err := p.open(logger) + if err != nil { + return nil, err + } + return conn, nil +} + +var strEsc = regexp.MustCompile(`([\\'])`) + +func dataSourceStr(str string) string { + return "'" + strEsc.ReplaceAllString(str, `\$1`) + "'" +} + +// createDataSourceName takes the configuration provided via the Postgres +// struct to create a data-source name that Go's database/sql package can +// make use of. +func (p *Postgres) createDataSourceName() string { + parameters := []string{} + + addParam := func(key, val string) { + parameters = append(parameters, fmt.Sprintf("%s=%s", key, val)) + } + + addParam("connect_timeout", strconv.Itoa(p.ConnectionTimeout)) + + // detect host:port for backwards-compatibility + host, port, err := net.SplitHostPort(p.Host) + if err != nil { + // not host:port, probably unix socket or bare address + + host = p.Host + + if p.Port != 0 { + port = strconv.Itoa(int(p.Port)) + } + } + + if host != "" { + addParam("host", dataSourceStr(host)) + } + + if port != "" { + addParam("port", port) + } + + if p.User != "" { + addParam("user", dataSourceStr(p.User)) + } + + if p.Password != "" { + addParam("password", dataSourceStr(p.Password)) + } + + if p.Database != "" { + addParam("dbname", dataSourceStr(p.Database)) + } + + if p.SSL.Mode == "" { + // Assume the strictest mode if unspecified. + addParam("sslmode", dataSourceStr(pgSSLVerifyFull)) + } else { + addParam("sslmode", dataSourceStr(p.SSL.Mode)) + } + + if p.SSL.CAFile != "" { + addParam("sslrootcert", dataSourceStr(p.SSL.CAFile)) + } + + if p.SSL.CertFile != "" { + addParam("sslcert", dataSourceStr(p.SSL.CertFile)) + } + + if p.SSL.KeyFile != "" { + addParam("sslkey", dataSourceStr(p.SSL.KeyFile)) + } + + return strings.Join(parameters, " ") +} + +func (p *Postgres) open(logger log.Logger) (*conn, error) { + dataSourceName := p.createDataSourceName() + + db, err := sql.Open("postgres", dataSourceName) + if err != nil { + return nil, err + } + + // set database/sql tunables if configured + if p.ConnMaxLifetime != 0 { + db.SetConnMaxLifetime(time.Duration(p.ConnMaxLifetime) * time.Second) + } + + if p.MaxIdleConns == 0 { + db.SetMaxIdleConns(5) + } else { + db.SetMaxIdleConns(p.MaxIdleConns) + } + + if p.MaxOpenConns == 0 { + db.SetMaxOpenConns(5) + } else { + db.SetMaxOpenConns(p.MaxOpenConns) + } + + errCheck := func(err error) bool { + sqlErr, ok := err.(*pq.Error) + if !ok { + return false + } + return sqlErr.Code == pgErrUniqueViolation + } + + c := &conn{db, &flavorPostgres, logger, errCheck} + if _, err := c.migrate(); err != nil { + return nil, fmt.Errorf("failed to perform migrations: %v", err) + } + return c, nil +} + +// MySQL options for creating a MySQL db. +type MySQL struct { + NetworkDB + + SSL SSL `json:"ssl" yaml:"ssl"` + + // TODO(pborzenkov): used by tests to reduce lock wait timeout. Should + // we make it exported and allow users to provide arbitrary params? + params map[string]string +} + +// Open creates a new storage implementation backed by MySQL. +func (s *MySQL) Open(logger log.Logger) (storage.Storage, error) { + conn, err := s.open(logger) + if err != nil { + return nil, err + } + return conn, nil +} + +func (s *MySQL) open(logger log.Logger) (*conn, error) { + cfg := mysql.Config{ + User: s.User, + Passwd: s.Password, + DBName: s.Database, + AllowNativePasswords: true, + + Timeout: time.Second * time.Duration(s.ConnectionTimeout), + + ParseTime: true, + Params: map[string]string{ + "transaction_isolation": "'SERIALIZABLE'", + }, + } + if s.Host != "" { + if s.Host[0] != '/' { + cfg.Net = "tcp" + cfg.Addr = s.Host + + if s.Port != 0 { + cfg.Addr = net.JoinHostPort(s.Host, strconv.Itoa(int(s.Port))) + } + } else { + cfg.Net = "unix" + cfg.Addr = s.Host + } + } + + switch { + case s.SSL.CAFile != "" || s.SSL.CertFile != "" || s.SSL.KeyFile != "": + if err := s.makeTLSConfig(); err != nil { + return nil, fmt.Errorf("failed to make TLS config: %v", err) + } + cfg.TLSConfig = mysqlSSLCustom + case s.SSL.Mode == "": + cfg.TLSConfig = mysqlSSLTrue + default: + cfg.TLSConfig = s.SSL.Mode + } + + for k, v := range s.params { + cfg.Params[k] = v + } + + db, err := sql.Open("mysql", cfg.FormatDSN()) + if err != nil { + return nil, err + } + + if s.MaxIdleConns == 0 { + /*Override default behaviour to fix https://github.com/dexidp/dex/issues/1608*/ + db.SetMaxIdleConns(0) + } else { + db.SetMaxIdleConns(s.MaxIdleConns) + } + + err = db.Ping() + if err != nil { + if mysqlErr, ok := err.(*mysql.MySQLError); ok && mysqlErr.Number == mysqlErrUnknownSysVar { + logger.Info("reconnecting with MySQL pre-5.7.20 compatibility mode") + + // MySQL 5.7.20 introduced transaction_isolation and deprecated tx_isolation. + // MySQL 8.0 doesn't have tx_isolation at all. + // https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation + delete(cfg.Params, "transaction_isolation") + cfg.Params["tx_isolation"] = "'SERIALIZABLE'" + + db, err = sql.Open("mysql", cfg.FormatDSN()) + if err != nil { + return nil, err + } + } else { + return nil, err + } + } + + errCheck := func(err error) bool { + sqlErr, ok := err.(*mysql.MySQLError) + if !ok { + return false + } + return sqlErr.Number == mysqlErrDupEntry || + sqlErr.Number == mysqlErrDupEntryWithKeyName + } + + c := &conn{db, &flavorMySQL, logger, errCheck} + if _, err := c.migrate(); err != nil { + return nil, fmt.Errorf("failed to perform migrations: %v", err) + } + return c, nil +} + +func (s *MySQL) makeTLSConfig() error { + cfg := &tls.Config{} + if s.SSL.CAFile != "" { + rootCertPool := x509.NewCertPool() + pem, err := os.ReadFile(s.SSL.CAFile) + if err != nil { + return err + } + if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { + return fmt.Errorf("failed to append PEM") + } + cfg.RootCAs = rootCertPool + } + if s.SSL.CertFile != "" && s.SSL.KeyFile != "" { + clientCert := make([]tls.Certificate, 0, 1) + certs, err := tls.LoadX509KeyPair(s.SSL.CertFile, s.SSL.KeyFile) + if err != nil { + return err + } + clientCert = append(clientCert, certs) + cfg.Certificates = clientCert + } + + mysql.RegisterTLSConfig(mysqlSSLCustom, cfg) + return nil +} diff --git a/vendor/github.com/dexidp/dex/storage/sql/crud.go b/vendor/github.com/dexidp/dex/storage/sql/crud.go new file mode 100644 index 00000000..7f8666db --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/sql/crud.go @@ -0,0 +1,1020 @@ +package sql + +import ( + "database/sql" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/dexidp/dex/storage" +) + +// TODO(ericchiang): The update, insert, and select methods queries are all +// very repetitive. Consider creating them programmatically. + +// keysRowID is the ID of the only row we expect to populate the "keys" table. +const keysRowID = "keys" + +// encoder wraps the underlying value in a JSON marshaler which is automatically +// called by the database/sql package. +// +// s := []string{"planes", "bears"} +// err := db.Exec(`insert into t1 (id, things) values (1, $1)`, encoder(s)) +// if err != nil { +// // handle error +// } +// +// var r []byte +// err = db.QueryRow(`select things from t1 where id = 1;`).Scan(&r) +// if err != nil { +// // handle error +// } +// fmt.Printf("%s\n", r) // ["planes","bears"] +func encoder(i interface{}) driver.Valuer { + return jsonEncoder{i} +} + +// decoder wraps the underlying value in a JSON unmarshaler which can then be passed +// to a database Scan() method. +func decoder(i interface{}) sql.Scanner { + return jsonDecoder{i} +} + +type jsonEncoder struct { + i interface{} +} + +func (j jsonEncoder) Value() (driver.Value, error) { + b, err := json.Marshal(j.i) + if err != nil { + return nil, fmt.Errorf("marshal: %v", err) + } + return b, nil +} + +type jsonDecoder struct { + i interface{} +} + +func (j jsonDecoder) Scan(dest interface{}) error { + if dest == nil { + return errors.New("nil value") + } + b, ok := dest.([]byte) + if !ok { + return fmt.Errorf("expected []byte got %T", dest) + } + if err := json.Unmarshal(b, &j.i); err != nil { + return fmt.Errorf("unmarshal: %v", err) + } + return nil +} + +// Abstract conn vs trans. +type querier interface { + QueryRow(query string, args ...interface{}) *sql.Row +} + +// Abstract row vs rows. +type scanner interface { + Scan(dest ...interface{}) error +} + +func (c *conn) GarbageCollect(now time.Time) (storage.GCResult, error) { + result := storage.GCResult{} + + r, err := c.Exec(`delete from auth_request where expiry < $1`, now) + if err != nil { + return result, fmt.Errorf("gc auth_request: %v", err) + } + if n, err := r.RowsAffected(); err == nil { + result.AuthRequests = n + } + + r, err = c.Exec(`delete from auth_code where expiry < $1`, now) + if err != nil { + return result, fmt.Errorf("gc auth_code: %v", err) + } + if n, err := r.RowsAffected(); err == nil { + result.AuthCodes = n + } + + r, err = c.Exec(`delete from device_request where expiry < $1`, now) + if err != nil { + return result, fmt.Errorf("gc device_request: %v", err) + } + if n, err := r.RowsAffected(); err == nil { + result.DeviceRequests = n + } + + r, err = c.Exec(`delete from device_token where expiry < $1`, now) + if err != nil { + return result, fmt.Errorf("gc device_token: %v", err) + } + if n, err := r.RowsAffected(); err == nil { + result.DeviceTokens = n + } + + return result, err +} + +func (c *conn) CreateAuthRequest(a storage.AuthRequest) error { + _, err := c.Exec(` + insert into auth_request ( + id, client_id, response_types, scopes, redirect_uri, nonce, state, + force_approval_prompt, logged_in, + claims_user_id, claims_username, claims_preferred_username, + claims_email, claims_email_verified, claims_groups, + connector_id, connector_data, + expiry, + code_challenge, code_challenge_method, + hmac_key + ) + values ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21 + ); + `, + a.ID, a.ClientID, encoder(a.ResponseTypes), encoder(a.Scopes), a.RedirectURI, a.Nonce, a.State, + a.ForceApprovalPrompt, a.LoggedIn, + a.Claims.UserID, a.Claims.Username, a.Claims.PreferredUsername, + a.Claims.Email, a.Claims.EmailVerified, encoder(a.Claims.Groups), + a.ConnectorID, a.ConnectorData, + a.Expiry, + a.PKCE.CodeChallenge, a.PKCE.CodeChallengeMethod, + a.HMACKey, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert auth request: %v", err) + } + return nil +} + +func (c *conn) UpdateAuthRequest(id string, updater func(a storage.AuthRequest) (storage.AuthRequest, error)) error { + return c.ExecTx(func(tx *trans) error { + r, err := getAuthRequest(tx, id) + if err != nil { + return err + } + + a, err := updater(r) + if err != nil { + return err + } + _, err = tx.Exec(` + update auth_request + set + client_id = $1, response_types = $2, scopes = $3, redirect_uri = $4, + nonce = $5, state = $6, force_approval_prompt = $7, logged_in = $8, + claims_user_id = $9, claims_username = $10, claims_preferred_username = $11, + claims_email = $12, claims_email_verified = $13, + claims_groups = $14, + connector_id = $15, connector_data = $16, + expiry = $17, + code_challenge = $18, code_challenge_method = $19, + hmac_key = $20 + where id = $21; + `, + a.ClientID, encoder(a.ResponseTypes), encoder(a.Scopes), a.RedirectURI, a.Nonce, a.State, + a.ForceApprovalPrompt, a.LoggedIn, + a.Claims.UserID, a.Claims.Username, a.Claims.PreferredUsername, + a.Claims.Email, a.Claims.EmailVerified, + encoder(a.Claims.Groups), + a.ConnectorID, a.ConnectorData, + a.Expiry, + a.PKCE.CodeChallenge, a.PKCE.CodeChallengeMethod, a.HMACKey, + r.ID, + ) + if err != nil { + return fmt.Errorf("update auth request: %v", err) + } + return nil + }) +} + +func (c *conn) GetAuthRequest(id string) (storage.AuthRequest, error) { + return getAuthRequest(c, id) +} + +func getAuthRequest(q querier, id string) (a storage.AuthRequest, err error) { + err = q.QueryRow(` + select + id, client_id, response_types, scopes, redirect_uri, nonce, state, + force_approval_prompt, logged_in, + claims_user_id, claims_username, claims_preferred_username, + claims_email, claims_email_verified, claims_groups, + connector_id, connector_data, expiry, + code_challenge, code_challenge_method, hmac_key + from auth_request where id = $1; + `, id).Scan( + &a.ID, &a.ClientID, decoder(&a.ResponseTypes), decoder(&a.Scopes), &a.RedirectURI, &a.Nonce, &a.State, + &a.ForceApprovalPrompt, &a.LoggedIn, + &a.Claims.UserID, &a.Claims.Username, &a.Claims.PreferredUsername, + &a.Claims.Email, &a.Claims.EmailVerified, + decoder(&a.Claims.Groups), + &a.ConnectorID, &a.ConnectorData, &a.Expiry, + &a.PKCE.CodeChallenge, &a.PKCE.CodeChallengeMethod, &a.HMACKey, + ) + if err != nil { + if err == sql.ErrNoRows { + return a, storage.ErrNotFound + } + return a, fmt.Errorf("select auth request: %v", err) + } + return a, nil +} + +func (c *conn) CreateAuthCode(a storage.AuthCode) error { + _, err := c.Exec(` + insert into auth_code ( + id, client_id, scopes, nonce, redirect_uri, + claims_user_id, claims_username, claims_preferred_username, + claims_email, claims_email_verified, claims_groups, + connector_id, connector_data, + expiry, + code_challenge, code_challenge_method + ) + values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16); + `, + a.ID, a.ClientID, encoder(a.Scopes), a.Nonce, a.RedirectURI, a.Claims.UserID, + a.Claims.Username, a.Claims.PreferredUsername, a.Claims.Email, a.Claims.EmailVerified, + encoder(a.Claims.Groups), a.ConnectorID, a.ConnectorData, a.Expiry, + a.PKCE.CodeChallenge, a.PKCE.CodeChallengeMethod, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert auth code: %v", err) + } + return nil +} + +func (c *conn) GetAuthCode(id string) (a storage.AuthCode, err error) { + err = c.QueryRow(` + select + id, client_id, scopes, nonce, redirect_uri, + claims_user_id, claims_username, claims_preferred_username, + claims_email, claims_email_verified, claims_groups, + connector_id, connector_data, + expiry, + code_challenge, code_challenge_method + from auth_code where id = $1; + `, id).Scan( + &a.ID, &a.ClientID, decoder(&a.Scopes), &a.Nonce, &a.RedirectURI, &a.Claims.UserID, + &a.Claims.Username, &a.Claims.PreferredUsername, &a.Claims.Email, &a.Claims.EmailVerified, + decoder(&a.Claims.Groups), &a.ConnectorID, &a.ConnectorData, &a.Expiry, + &a.PKCE.CodeChallenge, &a.PKCE.CodeChallengeMethod, + ) + if err != nil { + if err == sql.ErrNoRows { + return a, storage.ErrNotFound + } + return a, fmt.Errorf("select auth code: %v", err) + } + return a, nil +} + +func (c *conn) CreateRefresh(r storage.RefreshToken) error { + _, err := c.Exec(` + insert into refresh_token ( + id, client_id, scopes, nonce, + claims_user_id, claims_username, claims_preferred_username, + claims_email, claims_email_verified, claims_groups, + connector_id, connector_data, + token, obsolete_token, created_at, last_used + ) + values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16); + `, + r.ID, r.ClientID, encoder(r.Scopes), r.Nonce, + r.Claims.UserID, r.Claims.Username, r.Claims.PreferredUsername, + r.Claims.Email, r.Claims.EmailVerified, + encoder(r.Claims.Groups), + r.ConnectorID, r.ConnectorData, + r.Token, r.ObsoleteToken, r.CreatedAt, r.LastUsed, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert refresh_token: %v", err) + } + return nil +} + +func (c *conn) UpdateRefreshToken(id string, updater func(old storage.RefreshToken) (storage.RefreshToken, error)) error { + return c.ExecTx(func(tx *trans) error { + r, err := getRefresh(tx, id) + if err != nil { + return err + } + if r, err = updater(r); err != nil { + return err + } + _, err = tx.Exec(` + update refresh_token + set + client_id = $1, + scopes = $2, + nonce = $3, + claims_user_id = $4, + claims_username = $5, + claims_preferred_username = $6, + claims_email = $7, + claims_email_verified = $8, + claims_groups = $9, + connector_id = $10, + connector_data = $11, + token = $12, + obsolete_token = $13, + created_at = $14, + last_used = $15 + where + id = $16 + `, + r.ClientID, encoder(r.Scopes), r.Nonce, + r.Claims.UserID, r.Claims.Username, r.Claims.PreferredUsername, + r.Claims.Email, r.Claims.EmailVerified, + encoder(r.Claims.Groups), + r.ConnectorID, r.ConnectorData, + r.Token, r.ObsoleteToken, r.CreatedAt, r.LastUsed, id, + ) + if err != nil { + return fmt.Errorf("update refresh token: %v", err) + } + return nil + }) +} + +func (c *conn) GetRefresh(id string) (storage.RefreshToken, error) { + return getRefresh(c, id) +} + +func getRefresh(q querier, id string) (storage.RefreshToken, error) { + return scanRefresh(q.QueryRow(` + select + id, client_id, scopes, nonce, + claims_user_id, claims_username, claims_preferred_username, + claims_email, claims_email_verified, + claims_groups, + connector_id, connector_data, + token, obsolete_token, created_at, last_used + from refresh_token where id = $1; + `, id)) +} + +func (c *conn) ListRefreshTokens() ([]storage.RefreshToken, error) { + rows, err := c.Query(` + select + id, client_id, scopes, nonce, + claims_user_id, claims_username, claims_preferred_username, + claims_email, claims_email_verified, claims_groups, + connector_id, connector_data, + token, obsolete_token, created_at, last_used + from refresh_token; + `) + if err != nil { + return nil, fmt.Errorf("query: %v", err) + } + defer rows.Close() + + var tokens []storage.RefreshToken + for rows.Next() { + r, err := scanRefresh(rows) + if err != nil { + return nil, err + } + tokens = append(tokens, r) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("scan: %v", err) + } + return tokens, nil +} + +func scanRefresh(s scanner) (r storage.RefreshToken, err error) { + err = s.Scan( + &r.ID, &r.ClientID, decoder(&r.Scopes), &r.Nonce, + &r.Claims.UserID, &r.Claims.Username, &r.Claims.PreferredUsername, + &r.Claims.Email, &r.Claims.EmailVerified, + decoder(&r.Claims.Groups), + &r.ConnectorID, &r.ConnectorData, + &r.Token, &r.ObsoleteToken, &r.CreatedAt, &r.LastUsed, + ) + if err != nil { + if err == sql.ErrNoRows { + return r, storage.ErrNotFound + } + return r, fmt.Errorf("scan refresh_token: %v", err) + } + return r, nil +} + +func (c *conn) UpdateKeys(updater func(old storage.Keys) (storage.Keys, error)) error { + return c.ExecTx(func(tx *trans) error { + firstUpdate := false + // TODO(ericchiang): errors may cause a transaction be rolled back by the SQL + // server. Test this, and consider adding a COUNT() command beforehand. + old, err := getKeys(tx) + if err != nil { + if err != storage.ErrNotFound { + return fmt.Errorf("get keys: %v", err) + } + firstUpdate = true + old = storage.Keys{} + } + + nk, err := updater(old) + if err != nil { + return err + } + + if firstUpdate { + _, err = tx.Exec(` + insert into keys ( + id, verification_keys, signing_key, signing_key_pub, next_rotation + ) + values ($1, $2, $3, $4, $5); + `, + keysRowID, encoder(nk.VerificationKeys), encoder(nk.SigningKey), + encoder(nk.SigningKeyPub), nk.NextRotation, + ) + if err != nil { + return fmt.Errorf("insert: %v", err) + } + } else { + _, err = tx.Exec(` + update keys + set + verification_keys = $1, + signing_key = $2, + signing_key_pub = $3, + next_rotation = $4 + where id = $5; + `, + encoder(nk.VerificationKeys), encoder(nk.SigningKey), + encoder(nk.SigningKeyPub), nk.NextRotation, keysRowID, + ) + if err != nil { + return fmt.Errorf("update: %v", err) + } + } + return nil + }) +} + +func (c *conn) GetKeys() (keys storage.Keys, err error) { + return getKeys(c) +} + +func getKeys(q querier) (keys storage.Keys, err error) { + err = q.QueryRow(` + select + verification_keys, signing_key, signing_key_pub, next_rotation + from keys + where id=$1 + `, keysRowID).Scan( + decoder(&keys.VerificationKeys), decoder(&keys.SigningKey), + decoder(&keys.SigningKeyPub), &keys.NextRotation, + ) + if err != nil { + if err == sql.ErrNoRows { + return keys, storage.ErrNotFound + } + return keys, fmt.Errorf("query keys: %v", err) + } + return keys, nil +} + +func (c *conn) UpdateClient(id string, updater func(old storage.Client) (storage.Client, error)) error { + return c.ExecTx(func(tx *trans) error { + cli, err := getClient(tx, id) + if err != nil { + return err + } + nc, err := updater(cli) + if err != nil { + return err + } + + _, err = tx.Exec(` + update client + set + secret = $1, + redirect_uris = $2, + trusted_peers = $3, + public = $4, + name = $5, + logo_url = $6 + where id = $7; + `, nc.Secret, encoder(nc.RedirectURIs), encoder(nc.TrustedPeers), nc.Public, nc.Name, nc.LogoURL, id, + ) + if err != nil { + return fmt.Errorf("update client: %v", err) + } + return nil + }) +} + +func (c *conn) CreateClient(cli storage.Client) error { + _, err := c.Exec(` + insert into client ( + id, secret, redirect_uris, trusted_peers, public, name, logo_url + ) + values ($1, $2, $3, $4, $5, $6, $7); + `, + cli.ID, cli.Secret, encoder(cli.RedirectURIs), encoder(cli.TrustedPeers), + cli.Public, cli.Name, cli.LogoURL, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert client: %v", err) + } + return nil +} + +func getClient(q querier, id string) (storage.Client, error) { + return scanClient(q.QueryRow(` + select + id, secret, redirect_uris, trusted_peers, public, name, logo_url + from client where id = $1; + `, id)) +} + +func (c *conn) GetClient(id string) (storage.Client, error) { + return getClient(c, id) +} + +func (c *conn) ListClients() ([]storage.Client, error) { + rows, err := c.Query(` + select + id, secret, redirect_uris, trusted_peers, public, name, logo_url + from client; + `) + if err != nil { + return nil, err + } + defer rows.Close() + + var clients []storage.Client + for rows.Next() { + cli, err := scanClient(rows) + if err != nil { + return nil, err + } + clients = append(clients, cli) + } + if err := rows.Err(); err != nil { + return nil, err + } + return clients, nil +} + +func scanClient(s scanner) (cli storage.Client, err error) { + err = s.Scan( + &cli.ID, &cli.Secret, decoder(&cli.RedirectURIs), decoder(&cli.TrustedPeers), + &cli.Public, &cli.Name, &cli.LogoURL, + ) + if err != nil { + if err == sql.ErrNoRows { + return cli, storage.ErrNotFound + } + return cli, fmt.Errorf("get client: %v", err) + } + return cli, nil +} + +func (c *conn) CreatePassword(p storage.Password) error { + p.Email = strings.ToLower(p.Email) + _, err := c.Exec(` + insert into password ( + email, hash, username, user_id + ) + values ( + $1, $2, $3, $4 + ); + `, + p.Email, p.Hash, p.Username, p.UserID, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert password: %v", err) + } + return nil +} + +func (c *conn) UpdatePassword(email string, updater func(p storage.Password) (storage.Password, error)) error { + return c.ExecTx(func(tx *trans) error { + p, err := getPassword(tx, email) + if err != nil { + return err + } + + np, err := updater(p) + if err != nil { + return err + } + _, err = tx.Exec(` + update password + set + hash = $1, username = $2, user_id = $3 + where email = $4; + `, + np.Hash, np.Username, np.UserID, p.Email, + ) + if err != nil { + return fmt.Errorf("update password: %v", err) + } + return nil + }) +} + +func (c *conn) GetPassword(email string) (storage.Password, error) { + return getPassword(c, email) +} + +func getPassword(q querier, email string) (p storage.Password, err error) { + return scanPassword(q.QueryRow(` + select + email, hash, username, user_id + from password where email = $1; + `, strings.ToLower(email))) +} + +func (c *conn) ListPasswords() ([]storage.Password, error) { + rows, err := c.Query(` + select + email, hash, username, user_id + from password; + `) + if err != nil { + return nil, err + } + defer rows.Close() + + var passwords []storage.Password + for rows.Next() { + p, err := scanPassword(rows) + if err != nil { + return nil, err + } + passwords = append(passwords, p) + } + if err := rows.Err(); err != nil { + return nil, err + } + return passwords, nil +} + +func scanPassword(s scanner) (p storage.Password, err error) { + err = s.Scan( + &p.Email, &p.Hash, &p.Username, &p.UserID, + ) + if err != nil { + if err == sql.ErrNoRows { + return p, storage.ErrNotFound + } + return p, fmt.Errorf("select password: %v", err) + } + return p, nil +} + +func (c *conn) CreateOfflineSessions(s storage.OfflineSessions) error { + _, err := c.Exec(` + insert into offline_session ( + user_id, conn_id, refresh, connector_data + ) + values ( + $1, $2, $3, $4 + ); + `, + s.UserID, s.ConnID, encoder(s.Refresh), s.ConnectorData, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert offline session: %v", err) + } + return nil +} + +func (c *conn) UpdateOfflineSessions(userID string, connID string, updater func(s storage.OfflineSessions) (storage.OfflineSessions, error)) error { + return c.ExecTx(func(tx *trans) error { + s, err := getOfflineSessions(tx, userID, connID) + if err != nil { + return err + } + + newSession, err := updater(s) + if err != nil { + return err + } + _, err = tx.Exec(` + update offline_session + set + refresh = $1, + connector_data = $2 + where user_id = $3 AND conn_id = $4; + `, + encoder(newSession.Refresh), newSession.ConnectorData, s.UserID, s.ConnID, + ) + if err != nil { + return fmt.Errorf("update offline session: %v", err) + } + return nil + }) +} + +func (c *conn) GetOfflineSessions(userID string, connID string) (storage.OfflineSessions, error) { + return getOfflineSessions(c, userID, connID) +} + +func getOfflineSessions(q querier, userID string, connID string) (storage.OfflineSessions, error) { + return scanOfflineSessions(q.QueryRow(` + select + user_id, conn_id, refresh, connector_data + from offline_session + where user_id = $1 AND conn_id = $2; + `, userID, connID)) +} + +func scanOfflineSessions(s scanner) (o storage.OfflineSessions, err error) { + err = s.Scan( + &o.UserID, &o.ConnID, decoder(&o.Refresh), &o.ConnectorData, + ) + if err != nil { + if err == sql.ErrNoRows { + return o, storage.ErrNotFound + } + return o, fmt.Errorf("select offline session: %v", err) + } + return o, nil +} + +func (c *conn) CreateConnector(connector storage.Connector) error { + _, err := c.Exec(` + insert into connector ( + id, type, name, resource_version, config + ) + values ( + $1, $2, $3, $4, $5 + ); + `, + connector.ID, connector.Type, connector.Name, connector.ResourceVersion, connector.Config, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert connector: %v", err) + } + return nil +} + +func (c *conn) UpdateConnector(id string, updater func(s storage.Connector) (storage.Connector, error)) error { + return c.ExecTx(func(tx *trans) error { + connector, err := getConnector(tx, id) + if err != nil { + return err + } + + newConn, err := updater(connector) + if err != nil { + return err + } + _, err = tx.Exec(` + update connector + set + type = $1, + name = $2, + resource_version = $3, + config = $4 + where id = $5; + `, + newConn.Type, newConn.Name, newConn.ResourceVersion, newConn.Config, connector.ID, + ) + if err != nil { + return fmt.Errorf("update connector: %v", err) + } + return nil + }) +} + +func (c *conn) GetConnector(id string) (storage.Connector, error) { + return getConnector(c, id) +} + +func getConnector(q querier, id string) (storage.Connector, error) { + return scanConnector(q.QueryRow(` + select + id, type, name, resource_version, config + from connector + where id = $1; + `, id)) +} + +func scanConnector(s scanner) (c storage.Connector, err error) { + err = s.Scan( + &c.ID, &c.Type, &c.Name, &c.ResourceVersion, &c.Config, + ) + if err != nil { + if err == sql.ErrNoRows { + return c, storage.ErrNotFound + } + return c, fmt.Errorf("select connector: %v", err) + } + return c, nil +} + +func (c *conn) ListConnectors() ([]storage.Connector, error) { + rows, err := c.Query(` + select + id, type, name, resource_version, config + from connector; + `) + if err != nil { + return nil, err + } + defer rows.Close() + + var connectors []storage.Connector + for rows.Next() { + conn, err := scanConnector(rows) + if err != nil { + return nil, err + } + connectors = append(connectors, conn) + } + if err := rows.Err(); err != nil { + return nil, err + } + return connectors, nil +} + +func (c *conn) DeleteAuthRequest(id string) error { return c.delete("auth_request", "id", id) } +func (c *conn) DeleteAuthCode(id string) error { return c.delete("auth_code", "id", id) } +func (c *conn) DeleteClient(id string) error { return c.delete("client", "id", id) } +func (c *conn) DeleteRefresh(id string) error { return c.delete("refresh_token", "id", id) } +func (c *conn) DeletePassword(email string) error { + return c.delete("password", "email", strings.ToLower(email)) +} +func (c *conn) DeleteConnector(id string) error { return c.delete("connector", "id", id) } + +func (c *conn) DeleteOfflineSessions(userID string, connID string) error { + result, err := c.Exec(`delete from offline_session where user_id = $1 AND conn_id = $2`, userID, connID) + if err != nil { + return fmt.Errorf("delete offline_session: user_id = %s, conn_id = %s", userID, connID) + } + + // For now mandate that the driver implements RowsAffected. If we ever need to support + // a driver that doesn't implement this, we can run this in a transaction with a get beforehand. + n, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("rows affected: %v", err) + } + if n < 1 { + return storage.ErrNotFound + } + return nil +} + +// Do NOT call directly. Does not escape table. +func (c *conn) delete(table, field, id string) error { + result, err := c.Exec(`delete from `+table+` where `+field+` = $1`, id) + if err != nil { + return fmt.Errorf("delete %s: %v", table, id) + } + + // For now mandate that the driver implements RowsAffected. If we ever need to support + // a driver that doesn't implement this, we can run this in a transaction with a get beforehand. + n, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("rows affected: %v", err) + } + if n < 1 { + return storage.ErrNotFound + } + return nil +} + +func (c *conn) CreateDeviceRequest(d storage.DeviceRequest) error { + _, err := c.Exec(` + insert into device_request ( + user_code, device_code, client_id, client_secret, scopes, expiry + ) + values ( + $1, $2, $3, $4, $5, $6 + );`, + d.UserCode, d.DeviceCode, d.ClientID, d.ClientSecret, encoder(d.Scopes), d.Expiry, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert device request: %v", err) + } + return nil +} + +func (c *conn) CreateDeviceToken(t storage.DeviceToken) error { + _, err := c.Exec(` + insert into device_token ( + device_code, status, token, expiry, last_request, poll_interval, code_challenge, code_challenge_method + ) + values ( + $1, $2, $3, $4, $5, $6, $7, $8 + );`, + t.DeviceCode, t.Status, t.Token, t.Expiry, t.LastRequestTime, t.PollIntervalSeconds, t.PKCE.CodeChallenge, t.PKCE.CodeChallengeMethod, + ) + if err != nil { + if c.alreadyExistsCheck(err) { + return storage.ErrAlreadyExists + } + return fmt.Errorf("insert device token: %v", err) + } + return nil +} + +func (c *conn) GetDeviceRequest(userCode string) (storage.DeviceRequest, error) { + return getDeviceRequest(c, userCode) +} + +func getDeviceRequest(q querier, userCode string) (d storage.DeviceRequest, err error) { + err = q.QueryRow(` + select + device_code, client_id, client_secret, scopes, expiry + from device_request where user_code = $1; + `, userCode).Scan( + &d.DeviceCode, &d.ClientID, &d.ClientSecret, decoder(&d.Scopes), &d.Expiry, + ) + if err != nil { + if err == sql.ErrNoRows { + return d, storage.ErrNotFound + } + return d, fmt.Errorf("select device token: %v", err) + } + d.UserCode = userCode + return d, nil +} + +func (c *conn) GetDeviceToken(deviceCode string) (storage.DeviceToken, error) { + return getDeviceToken(c, deviceCode) +} + +func getDeviceToken(q querier, deviceCode string) (a storage.DeviceToken, err error) { + err = q.QueryRow(` + select + status, token, expiry, last_request, poll_interval, code_challenge, code_challenge_method + from device_token where device_code = $1; + `, deviceCode).Scan( + &a.Status, &a.Token, &a.Expiry, &a.LastRequestTime, &a.PollIntervalSeconds, &a.PKCE.CodeChallenge, &a.PKCE.CodeChallengeMethod, + ) + if err != nil { + if err == sql.ErrNoRows { + return a, storage.ErrNotFound + } + return a, fmt.Errorf("select device token: %v", err) + } + a.DeviceCode = deviceCode + return a, nil +} + +func (c *conn) UpdateDeviceToken(deviceCode string, updater func(old storage.DeviceToken) (storage.DeviceToken, error)) error { + return c.ExecTx(func(tx *trans) error { + r, err := getDeviceToken(tx, deviceCode) + if err != nil { + return err + } + if r, err = updater(r); err != nil { + return err + } + _, err = tx.Exec(` + update device_token + set + status = $1, + token = $2, + last_request = $3, + poll_interval = $4, + code_challenge = $5, + code_challenge_method = $6 + where + device_code = $7 + `, + r.Status, r.Token, r.LastRequestTime, r.PollIntervalSeconds, r.PKCE.CodeChallenge, r.PKCE.CodeChallengeMethod, r.DeviceCode, + ) + if err != nil { + return fmt.Errorf("update device token: %v", err) + } + return nil + }) +} diff --git a/vendor/github.com/dexidp/dex/storage/sql/migrate.go b/vendor/github.com/dexidp/dex/storage/sql/migrate.go new file mode 100644 index 00000000..83e9c20d --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/sql/migrate.go @@ -0,0 +1,301 @@ +package sql + +import ( + "database/sql" + "fmt" +) + +func (c *conn) migrate() (int, error) { + _, err := c.Exec(` + create table if not exists migrations ( + num integer not null, + at timestamptz not null + ); + `) + if err != nil { + return 0, fmt.Errorf("creating migration table: %v", err) + } + + i := 0 + done := false + + var flavorMigrations []migration + for _, m := range migrations { + if m.flavor == nil || m.flavor == c.flavor { + flavorMigrations = append(flavorMigrations, m) + } + } + + for { + err := c.ExecTx(func(tx *trans) error { + // Within a transaction, perform a single migration. + var ( + num sql.NullInt64 + n int + ) + if err := tx.QueryRow(`select max(num) from migrations;`).Scan(&num); err != nil { + return fmt.Errorf("select max migration: %v", err) + } + if num.Valid { + n = int(num.Int64) + } + if n >= len(flavorMigrations) { + done = true + return nil + } + + migrationNum := n + 1 + m := flavorMigrations[n] + for i := range m.stmts { + if _, err := tx.Exec(m.stmts[i]); err != nil { + return fmt.Errorf("migration %d statement %d failed: %v", migrationNum, i+1, err) + } + } + + q := `insert into migrations (num, at) values ($1, now());` + if _, err := tx.Exec(q, migrationNum); err != nil { + return fmt.Errorf("update migration table: %v", err) + } + return nil + }) + if err != nil { + return i, err + } + if done { + break + } + i++ + } + + return i, nil +} + +type migration struct { + stmts []string + + // If flavor is nil the migration will take place for all database backend flavors. + // If specified, only for that corresponding flavor, in that case stmts can be written + // in the specific SQL dialect. + flavor *flavor +} + +// All SQL flavors share migration strategies. +var migrations = []migration{ + { + stmts: []string{ + ` + create table client ( + id text not null primary key, + secret text not null, + redirect_uris bytea not null, -- JSON array of strings + trusted_peers bytea not null, -- JSON array of strings + public boolean not null, + name text not null, + logo_url text not null + );`, + ` + create table auth_request ( + id text not null primary key, + client_id text not null, + response_types bytea not null, -- JSON array of strings + scopes bytea not null, -- JSON array of strings + redirect_uri text not null, + nonce text not null, + state text not null, + force_approval_prompt boolean not null, + + logged_in boolean not null, + + claims_user_id text not null, + claims_username text not null, + claims_email text not null, + claims_email_verified boolean not null, + claims_groups bytea not null, -- JSON array of strings + + connector_id text not null, + connector_data bytea, + + expiry timestamptz not null + );`, + ` + create table auth_code ( + id text not null primary key, + client_id text not null, + scopes bytea not null, -- JSON array of strings + nonce text not null, + redirect_uri text not null, + + claims_user_id text not null, + claims_username text not null, + claims_email text not null, + claims_email_verified boolean not null, + claims_groups bytea not null, -- JSON array of strings + + connector_id text not null, + connector_data bytea, + + expiry timestamptz not null + );`, + ` + create table refresh_token ( + id text not null primary key, + client_id text not null, + scopes bytea not null, -- JSON array of strings + nonce text not null, + + claims_user_id text not null, + claims_username text not null, + claims_email text not null, + claims_email_verified boolean not null, + claims_groups bytea not null, -- JSON array of strings + + connector_id text not null, + connector_data bytea + );`, + ` + create table password ( + email text not null primary key, + hash bytea not null, + username text not null, + user_id text not null + );`, + ` + -- keys is a weird table because we only ever expect there to be a single row + create table keys ( + id text not null primary key, + verification_keys bytea not null, -- JSON array + signing_key bytea not null, -- JSON object + signing_key_pub bytea not null, -- JSON object + next_rotation timestamptz not null + );`, + }, + }, + { + stmts: []string{ + ` + alter table refresh_token + add column token text not null default '';`, + ` + alter table refresh_token + add column created_at timestamptz not null default '0001-01-01 00:00:00 UTC';`, + ` + alter table refresh_token + add column last_used timestamptz not null default '0001-01-01 00:00:00 UTC';`, + }, + }, + { + stmts: []string{ + ` + create table offline_session ( + user_id text not null, + conn_id text not null, + refresh bytea not null, + PRIMARY KEY (user_id, conn_id) + );`, + }, + }, + { + stmts: []string{ + ` + create table connector ( + id text not null primary key, + type text not null, + name text not null, + resource_version text not null, + config bytea + );`, + }, + }, + { + stmts: []string{ + ` + alter table auth_code + add column claims_preferred_username text not null default '';`, + ` + alter table auth_request + add column claims_preferred_username text not null default '';`, + ` + alter table refresh_token + add column claims_preferred_username text not null default '';`, + }, + }, + { + stmts: []string{ + ` + alter table offline_session + add column connector_data bytea; + `, + }, + }, + { + stmts: []string{ + ` + alter table auth_request + modify column state varchar(4096); + `, + }, + flavor: &flavorMySQL, + }, + { + stmts: []string{ + ` + create table device_request ( + user_code text not null primary key, + device_code text not null, + client_id text not null, + client_secret text , + scopes bytea not null, -- JSON array of strings + expiry timestamptz not null + );`, + ` + create table device_token ( + device_code text not null primary key, + status text not null, + token bytea, + expiry timestamptz not null, + last_request timestamptz not null, + poll_interval integer not null + );`, + }, + }, + { + stmts: []string{ + ` + alter table auth_request + add column code_challenge text not null default '';`, + ` + alter table auth_request + add column code_challenge_method text not null default '';`, + ` + alter table auth_code + add column code_challenge text not null default '';`, + ` + alter table auth_code + add column code_challenge_method text not null default '';`, + }, + }, + { + stmts: []string{ + ` + alter table refresh_token + add column obsolete_token text default '';`, + }, + }, + { + stmts: []string{ + ` + alter table device_token + add column code_challenge text not null default '';`, + ` + alter table device_token + add column code_challenge_method text not null default '';`, + }, + }, + { + stmts: []string{ + ` + alter table auth_request + add column hmac_key bytea;`, + }, + }, +} diff --git a/vendor/github.com/dexidp/dex/storage/sql/sql.go b/vendor/github.com/dexidp/dex/storage/sql/sql.go new file mode 100644 index 00000000..0a292169 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/sql/sql.go @@ -0,0 +1,198 @@ +// Package sql provides SQL implementations of the storage interface. +package sql + +import ( + "database/sql" + "regexp" + "time" + + // import third party drivers + _ "github.com/lib/pq" + _ "github.com/mattn/go-sqlite3" + + "github.com/dexidp/dex/pkg/log" +) + +// flavor represents a specific SQL implementation, and is used to translate query strings +// between different drivers. Flavors shouldn't aim to translate all possible SQL statements, +// only the specific queries used by the SQL storages. +type flavor struct { + queryReplacers []replacer + + // Optional function to create and finish a transaction. + executeTx func(db *sql.DB, fn func(*sql.Tx) error) error + + // Does the flavor support timezones? + supportsTimezones bool +} + +// A regexp with a replacement string. +type replacer struct { + re *regexp.Regexp + with string +} + +// Match a postgres query binds. E.g. "$1", "$12", etc. +var bindRegexp = regexp.MustCompile(`\$\d+`) + +func matchLiteral(s string) *regexp.Regexp { + return regexp.MustCompile(`\b` + regexp.QuoteMeta(s) + `\b`) +} + +var ( + // The "github.com/lib/pq" driver is the default flavor. All others are + // translations of this. + flavorPostgres = flavor{ + // The default behavior for Postgres transactions is consistent reads, not consistent writes. + // For each transaction opened, ensure it has the correct isolation level. + // + // See: https://www.postgresql.org/docs/9.3/static/sql-set-transaction.html + // + // NOTE(ericchiang): For some reason using `SET SESSION CHARACTERISTICS AS TRANSACTION` at a + // session level didn't work for some edge cases. Might be something worth exploring. + executeTx: func(db *sql.DB, fn func(sqlTx *sql.Tx) error) error { + tx, err := db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + if _, err := tx.Exec(`SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;`); err != nil { + return err + } + if err := fn(tx); err != nil { + return err + } + return tx.Commit() + }, + + supportsTimezones: true, + } + + flavorSQLite3 = flavor{ + queryReplacers: []replacer{ + {bindRegexp, "?"}, + // Translate for booleans to integers. + {matchLiteral("true"), "1"}, + {matchLiteral("false"), "0"}, + {matchLiteral("boolean"), "integer"}, + // Translate other types. + {matchLiteral("bytea"), "blob"}, + {matchLiteral("timestamptz"), "timestamp"}, + // SQLite doesn't have a "now()" method, replace with "date('now')" + {regexp.MustCompile(`\bnow\(\)`), "date('now')"}, + }, + } + + flavorMySQL = flavor{ + queryReplacers: []replacer{ + {bindRegexp, "?"}, + // Translate types. + {matchLiteral("bytea"), "blob"}, + {matchLiteral("timestamptz"), "datetime(3)"}, + // MySQL doesn't support indices on text fields w/o + // specifying key length. Use varchar instead (767 byte + // is the max key length for InnoDB with 4k pages). + // For compound indexes (with two keys) even less. + {matchLiteral("text"), "varchar(384)"}, + // Quote keywords and reserved words used as identifiers. + {regexp.MustCompile(`\b(keys)\b`), "`$1`"}, + // Change default timestamp to fit datetime. + {regexp.MustCompile(`0001-01-01 00:00:00 UTC`), "1000-01-01 00:00:00"}, + }, + } +) + +func (f flavor) translate(query string) string { + // TODO(ericchiang): Heavy cashing. + for _, r := range f.queryReplacers { + query = r.re.ReplaceAllString(query, r.with) + } + return query +} + +// translateArgs translates query parameters that may be unique to +// a specific SQL flavor. For example, standardizing "time.Time" +// types to UTC for clients that don't provide timezone support. +func (c *conn) translateArgs(args []interface{}) []interface{} { + if c.flavor.supportsTimezones { + return args + } + + for i, arg := range args { + if t, ok := arg.(time.Time); ok { + args[i] = t.UTC() + } + } + return args +} + +// conn is the main database connection. +type conn struct { + db *sql.DB + flavor *flavor + logger log.Logger + alreadyExistsCheck func(err error) bool +} + +func (c *conn) Close() error { + return c.db.Close() +} + +// conn implements the same method signatures as encoding/sql.DB. + +func (c *conn) Exec(query string, args ...interface{}) (sql.Result, error) { + query = c.flavor.translate(query) + return c.db.Exec(query, c.translateArgs(args)...) +} + +func (c *conn) Query(query string, args ...interface{}) (*sql.Rows, error) { + query = c.flavor.translate(query) + return c.db.Query(query, c.translateArgs(args)...) +} + +func (c *conn) QueryRow(query string, args ...interface{}) *sql.Row { + query = c.flavor.translate(query) + return c.db.QueryRow(query, c.translateArgs(args)...) +} + +// ExecTx runs a method which operates on a transaction. +func (c *conn) ExecTx(fn func(tx *trans) error) error { + if c.flavor.executeTx != nil { + return c.flavor.executeTx(c.db, func(sqlTx *sql.Tx) error { + return fn(&trans{sqlTx, c}) + }) + } + + sqlTx, err := c.db.Begin() + if err != nil { + return err + } + if err := fn(&trans{sqlTx, c}); err != nil { + sqlTx.Rollback() + return err + } + return sqlTx.Commit() +} + +type trans struct { + tx *sql.Tx + c *conn +} + +// trans implements the same method signatures as encoding/sql.Tx. + +func (t *trans) Exec(query string, args ...interface{}) (sql.Result, error) { + query = t.c.flavor.translate(query) + return t.tx.Exec(query, t.c.translateArgs(args)...) +} + +func (t *trans) Query(query string, args ...interface{}) (*sql.Rows, error) { + query = t.c.flavor.translate(query) + return t.tx.Query(query, t.c.translateArgs(args)...) +} + +func (t *trans) QueryRow(query string, args ...interface{}) *sql.Row { + query = t.c.flavor.translate(query) + return t.tx.QueryRow(query, t.c.translateArgs(args)...) +} diff --git a/vendor/github.com/dexidp/dex/storage/sql/sqlite.go b/vendor/github.com/dexidp/dex/storage/sql/sqlite.go new file mode 100644 index 00000000..43df671a --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/sql/sqlite.go @@ -0,0 +1,53 @@ +//go:build cgo +// +build cgo + +package sql + +import ( + "database/sql" + "fmt" + + sqlite3 "github.com/mattn/go-sqlite3" + + "github.com/dexidp/dex/pkg/log" + "github.com/dexidp/dex/storage" +) + +// SQLite3 options for creating an SQL db. +type SQLite3 struct { + // File to + File string `json:"file"` +} + +// Open creates a new storage implementation backed by SQLite3 +func (s *SQLite3) Open(logger log.Logger) (storage.Storage, error) { + conn, err := s.open(logger) + if err != nil { + return nil, err + } + return conn, nil +} + +func (s *SQLite3) open(logger log.Logger) (*conn, error) { + db, err := sql.Open("sqlite3", s.File) + if err != nil { + return nil, err + } + + // always allow only one connection to sqlite3, any other thread/go-routine + // attempting concurrent access will have to wait + db.SetMaxOpenConns(1) + errCheck := func(err error) bool { + sqlErr, ok := err.(sqlite3.Error) + if !ok { + return false + } + return sqlErr.ExtendedCode == sqlite3.ErrConstraintPrimaryKey + } + + c := &conn{db, &flavorSQLite3, logger, errCheck} + if _, err := c.migrate(); err != nil { + return nil, fmt.Errorf("failed to perform migrations: %v", err) + } + return c, nil +} diff --git a/vendor/github.com/dexidp/dex/storage/static.go b/vendor/github.com/dexidp/dex/storage/static.go new file mode 100644 index 00000000..806b61f9 --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/static.go @@ -0,0 +1,232 @@ +package storage + +import ( + "errors" + "strings" + + "github.com/dexidp/dex/pkg/log" +) + +// Tests for this code are in the "memory" package, since this package doesn't +// define a concrete storage implementation. + +// staticClientsStorage is a storage that only allow read-only actions on clients. +// All read actions return from the list of clients stored in memory, not the +// underlying +type staticClientsStorage struct { + Storage + + // A read-only set of clients. + clients []Client + clientsByID map[string]Client +} + +// WithStaticClients adds a read-only set of clients to the underlying storages. +func WithStaticClients(s Storage, staticClients []Client) Storage { + clientsByID := make(map[string]Client, len(staticClients)) + for _, client := range staticClients { + clientsByID[client.ID] = client + } + + return staticClientsStorage{s, staticClients, clientsByID} +} + +func (s staticClientsStorage) GetClient(id string) (Client, error) { + if client, ok := s.clientsByID[id]; ok { + return client, nil + } + return s.Storage.GetClient(id) +} + +func (s staticClientsStorage) isStatic(id string) bool { + _, ok := s.clientsByID[id] + return ok +} + +func (s staticClientsStorage) ListClients() ([]Client, error) { + clients, err := s.Storage.ListClients() + if err != nil { + return nil, err + } + n := 0 + for _, client := range clients { + // If a client in the backing storage has the same ID as a static client + // prefer the static client. + if !s.isStatic(client.ID) { + clients[n] = client + n++ + } + } + return append(clients[:n], s.clients...), nil +} + +func (s staticClientsStorage) CreateClient(c Client) error { + if s.isStatic(c.ID) { + return errors.New("static clients: read-only cannot create client") + } + return s.Storage.CreateClient(c) +} + +func (s staticClientsStorage) DeleteClient(id string) error { + if s.isStatic(id) { + return errors.New("static clients: read-only cannot delete client") + } + return s.Storage.DeleteClient(id) +} + +func (s staticClientsStorage) UpdateClient(id string, updater func(old Client) (Client, error)) error { + if s.isStatic(id) { + return errors.New("static clients: read-only cannot update client") + } + return s.Storage.UpdateClient(id, updater) +} + +type staticPasswordsStorage struct { + Storage + + // A read-only set of passwords. + passwords []Password + // A map of passwords that is indexed by lower-case email ids + passwordsByEmail map[string]Password + + logger log.Logger +} + +// WithStaticPasswords returns a storage with a read-only set of passwords. +func WithStaticPasswords(s Storage, staticPasswords []Password, logger log.Logger) Storage { + passwordsByEmail := make(map[string]Password, len(staticPasswords)) + for _, p := range staticPasswords { + // Enable case insensitive email comparison. + lowerEmail := strings.ToLower(p.Email) + if _, ok := passwordsByEmail[lowerEmail]; ok { + logger.Errorf("Attempting to create StaticPasswords with the same email id: %s", p.Email) + } + passwordsByEmail[lowerEmail] = p + } + + return staticPasswordsStorage{s, staticPasswords, passwordsByEmail, logger} +} + +func (s staticPasswordsStorage) isStatic(email string) bool { + _, ok := s.passwordsByEmail[strings.ToLower(email)] + return ok +} + +func (s staticPasswordsStorage) GetPassword(email string) (Password, error) { + // TODO(ericchiang): BLAH. We really need to figure out how to handle + // lower cased emails better. + email = strings.ToLower(email) + if password, ok := s.passwordsByEmail[email]; ok { + return password, nil + } + return s.Storage.GetPassword(email) +} + +func (s staticPasswordsStorage) ListPasswords() ([]Password, error) { + passwords, err := s.Storage.ListPasswords() + if err != nil { + return nil, err + } + + n := 0 + for _, password := range passwords { + // If an entry has the same email as those provided in the static + // values, prefer the static value. + if !s.isStatic(password.Email) { + passwords[n] = password + n++ + } + } + return append(passwords[:n], s.passwords...), nil +} + +func (s staticPasswordsStorage) CreatePassword(p Password) error { + if s.isStatic(p.Email) { + return errors.New("static passwords: read-only cannot create password") + } + return s.Storage.CreatePassword(p) +} + +func (s staticPasswordsStorage) DeletePassword(email string) error { + if s.isStatic(email) { + return errors.New("static passwords: read-only cannot delete password") + } + return s.Storage.DeletePassword(email) +} + +func (s staticPasswordsStorage) UpdatePassword(email string, updater func(old Password) (Password, error)) error { + if s.isStatic(email) { + return errors.New("static passwords: read-only cannot update password") + } + return s.Storage.UpdatePassword(email, updater) +} + +// staticConnectorsStorage represents a storage with read-only set of connectors. +type staticConnectorsStorage struct { + Storage + + // A read-only set of connectors. + connectors []Connector + connectorsByID map[string]Connector +} + +// WithStaticConnectors returns a storage with a read-only set of Connectors. Write actions, +// such as updating existing Connectors, will fail. +func WithStaticConnectors(s Storage, staticConnectors []Connector) Storage { + connectorsByID := make(map[string]Connector, len(staticConnectors)) + for _, c := range staticConnectors { + connectorsByID[c.ID] = c + } + return staticConnectorsStorage{s, staticConnectors, connectorsByID} +} + +func (s staticConnectorsStorage) isStatic(id string) bool { + _, ok := s.connectorsByID[id] + return ok +} + +func (s staticConnectorsStorage) GetConnector(id string) (Connector, error) { + if connector, ok := s.connectorsByID[id]; ok { + return connector, nil + } + return s.Storage.GetConnector(id) +} + +func (s staticConnectorsStorage) ListConnectors() ([]Connector, error) { + connectors, err := s.Storage.ListConnectors() + if err != nil { + return nil, err + } + + n := 0 + for _, connector := range connectors { + // If an entry has the same id as those provided in the static + // values, prefer the static value. + if !s.isStatic(connector.ID) { + connectors[n] = connector + n++ + } + } + return append(connectors[:n], s.connectors...), nil +} + +func (s staticConnectorsStorage) CreateConnector(c Connector) error { + if s.isStatic(c.ID) { + return errors.New("static connectors: read-only cannot create connector") + } + return s.Storage.CreateConnector(c) +} + +func (s staticConnectorsStorage) DeleteConnector(id string) error { + if s.isStatic(id) { + return errors.New("static connectors: read-only cannot delete connector") + } + return s.Storage.DeleteConnector(id) +} + +func (s staticConnectorsStorage) UpdateConnector(id string, updater func(old Connector) (Connector, error)) error { + if s.isStatic(id) { + return errors.New("static connectors: read-only cannot update connector") + } + return s.Storage.UpdateConnector(id, updater) +} diff --git a/vendor/github.com/dexidp/dex/storage/storage.go b/vendor/github.com/dexidp/dex/storage/storage.go new file mode 100644 index 00000000..743d2ecb --- /dev/null +++ b/vendor/github.com/dexidp/dex/storage/storage.go @@ -0,0 +1,444 @@ +package storage + +import ( + "crypto" + "crypto/rand" + "encoding/base32" + "errors" + "io" + "math/big" + "strings" + "time" + + jose "gopkg.in/square/go-jose.v2" +) + +var ( + // ErrNotFound is the error returned by storages if a resource cannot be found. + ErrNotFound = errors.New("not found") + + // ErrAlreadyExists is the error returned by storages if a resource ID is taken during a create. + ErrAlreadyExists = errors.New("ID already exists") +) + +// Kubernetes only allows lower case letters for names. +// +// TODO(ericchiang): refactor ID creation onto the storage. +var encoding = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567") + +// Valid characters for user codes +const validUserCharacters = "BCDFGHJKLMNPQRSTVWXZ" + +// NewDeviceCode returns a 32 char alphanumeric cryptographically secure string +func NewDeviceCode() string { + return newSecureID(32) +} + +// NewID returns a random string which can be used as an ID for objects. +func NewID() string { + return newSecureID(16) +} + +func newSecureID(len int) string { + buff := make([]byte, len) // random ID. + if _, err := io.ReadFull(rand.Reader, buff); err != nil { + panic(err) + } + // Avoid the identifier to begin with number and trim padding + return string(buff[0]%26+'a') + strings.TrimRight(encoding.EncodeToString(buff[1:]), "=") +} + +// NewHMACKey returns a random key which can be used in the computation of an HMAC +func NewHMACKey(h crypto.Hash) []byte { + return []byte(newSecureID(h.Size())) +} + +// GCResult returns the number of objects deleted by garbage collection. +type GCResult struct { + AuthRequests int64 + AuthCodes int64 + DeviceRequests int64 + DeviceTokens int64 +} + +// IsEmpty returns whether the garbage collection result is empty or not. +func (g *GCResult) IsEmpty() bool { + return g.AuthRequests == 0 && + g.AuthCodes == 0 && + g.DeviceRequests == 0 && + g.DeviceTokens == 0 +} + +// Storage is the storage interface used by the server. Implementations are +// required to be able to perform atomic compare-and-swap updates and either +// support timezones or standardize on UTC. +type Storage interface { + Close() error + + // TODO(ericchiang): Let the storages set the IDs of these objects. + CreateAuthRequest(a AuthRequest) error + CreateClient(c Client) error + CreateAuthCode(c AuthCode) error + CreateRefresh(r RefreshToken) error + CreatePassword(p Password) error + CreateOfflineSessions(s OfflineSessions) error + CreateConnector(c Connector) error + CreateDeviceRequest(d DeviceRequest) error + CreateDeviceToken(d DeviceToken) error + + // TODO(ericchiang): return (T, bool, error) so we can indicate not found + // requests that way instead of using ErrNotFound. + GetAuthRequest(id string) (AuthRequest, error) + GetAuthCode(id string) (AuthCode, error) + GetClient(id string) (Client, error) + GetKeys() (Keys, error) + GetRefresh(id string) (RefreshToken, error) + GetPassword(email string) (Password, error) + GetOfflineSessions(userID string, connID string) (OfflineSessions, error) + GetConnector(id string) (Connector, error) + GetDeviceRequest(userCode string) (DeviceRequest, error) + GetDeviceToken(deviceCode string) (DeviceToken, error) + + ListClients() ([]Client, error) + ListRefreshTokens() ([]RefreshToken, error) + ListPasswords() ([]Password, error) + ListConnectors() ([]Connector, error) + + // Delete methods MUST be atomic. + DeleteAuthRequest(id string) error + DeleteAuthCode(code string) error + DeleteClient(id string) error + DeleteRefresh(id string) error + DeletePassword(email string) error + DeleteOfflineSessions(userID string, connID string) error + DeleteConnector(id string) error + + // Update methods take a function for updating an object then performs that update within + // a transaction. "updater" functions may be called multiple times by a single update call. + // + // Because new fields may be added to resources, updaters should only modify existing + // fields on the old object rather then creating new structs. For example: + // + // updater := func(old storage.Client) (storage.Client, error) { + // old.Secret = newSecret + // return old, nil + // } + // if err := s.UpdateClient(clientID, updater); err != nil { + // // update failed, handle error + // } + // + UpdateClient(id string, updater func(old Client) (Client, error)) error + UpdateKeys(updater func(old Keys) (Keys, error)) error + UpdateAuthRequest(id string, updater func(a AuthRequest) (AuthRequest, error)) error + UpdateRefreshToken(id string, updater func(r RefreshToken) (RefreshToken, error)) error + UpdatePassword(email string, updater func(p Password) (Password, error)) error + UpdateOfflineSessions(userID string, connID string, updater func(s OfflineSessions) (OfflineSessions, error)) error + UpdateConnector(id string, updater func(c Connector) (Connector, error)) error + UpdateDeviceToken(deviceCode string, updater func(t DeviceToken) (DeviceToken, error)) error + + // GarbageCollect deletes all expired AuthCodes, + // AuthRequests, DeviceRequests, and DeviceTokens. + GarbageCollect(now time.Time) (GCResult, error) +} + +// Client represents an OAuth2 client. +// +// For further reading see: +// - Trusted peers: https://developers.google.com/identity/protocols/CrossClientAuth +// - Public clients: https://developers.google.com/api-client-library/python/auth/installed-app +type Client struct { + // Client ID and secret used to identify the client. + ID string `json:"id" yaml:"id"` + IDEnv string `json:"idEnv" yaml:"idEnv"` + Secret string `json:"secret" yaml:"secret"` + SecretEnv string `json:"secretEnv" yaml:"secretEnv"` + + // A registered set of redirect URIs. When redirecting from dex to the client, the URI + // requested to redirect to MUST match one of these values, unless the client is "public". + RedirectURIs []string `json:"redirectURIs" yaml:"redirectURIs"` + + // TrustedPeers are a list of peers which can issue tokens on this client's behalf using + // the dynamic "oauth2:server:client_id:(client_id)" scope. If a peer makes such a request, + // this client's ID will appear as the ID Token's audience. + // + // Clients inherently trust themselves. + TrustedPeers []string `json:"trustedPeers" yaml:"trustedPeers"` + + // Public clients must use either use a redirectURL 127.0.0.1:X or "urn:ietf:wg:oauth:2.0:oob" + Public bool `json:"public" yaml:"public"` + + // Name and LogoURL used when displaying this client to the end user. + Name string `json:"name" yaml:"name"` + LogoURL string `json:"logoURL" yaml:"logoURL"` +} + +// Claims represents the ID Token claims supported by the server. +type Claims struct { + UserID string + Username string + PreferredUsername string + Email string + EmailVerified bool + + Groups []string +} + +// PKCE is a container for the data needed to perform Proof Key for Code Exchange (RFC 7636) auth flow +type PKCE struct { + CodeChallenge string + CodeChallengeMethod string +} + +// AuthRequest represents a OAuth2 client authorization request. It holds the state +// of a single auth flow up to the point that the user authorizes the client. +type AuthRequest struct { + // ID used to identify the authorization request. + ID string + + // ID of the client requesting authorization from a user. + ClientID string + + // Values parsed from the initial request. These describe the resources the client is + // requesting as well as values describing the form of the response. + ResponseTypes []string + Scopes []string + RedirectURI string + Nonce string + State string + + // The client has indicated that the end user must be shown an approval prompt + // on all requests. The server cannot cache their initial action for subsequent + // attempts. + ForceApprovalPrompt bool + + Expiry time.Time + + // Has the user proved their identity through a backing identity provider? + // + // If false, the following fields are invalid. + LoggedIn bool + + // The identity of the end user. Generally nil until the user authenticates + // with a backend. + Claims Claims + + // The connector used to login the user and any data the connector wishes to persists. + // Set when the user authenticates. + ConnectorID string + ConnectorData []byte + + // PKCE CodeChallenge and CodeChallengeMethod + PKCE PKCE + + // HMACKey is used when generating an AuthRequest-specific HMAC + HMACKey []byte +} + +// AuthCode represents a code which can be exchanged for an OAuth2 token response. +// +// This value is created once an end user has authorized a client, the server has +// redirect the end user back to the client, but the client hasn't exchanged the +// code for an access_token and id_token. +type AuthCode struct { + // Actual string returned as the "code" value. + ID string + + // The client this code value is valid for. When exchanging the code for a + // token response, the client must use its client_secret to authenticate. + ClientID string + + // As part of the OAuth2 spec when a client makes a token request it MUST + // present the same redirect_uri as the initial redirect. This values is saved + // to make this check. + // + // https://tools.ietf.org/html/rfc6749#section-4.1.3 + RedirectURI string + + // If provided by the client in the initial request, the provider MUST create + // a ID Token with this nonce in the JWT payload. + Nonce string + + // Scopes authorized by the end user for the client. + Scopes []string + + // Authentication data provided by an upstream source. + ConnectorID string + ConnectorData []byte + Claims Claims + + Expiry time.Time + + // PKCE CodeChallenge and CodeChallengeMethod + PKCE PKCE +} + +// RefreshToken is an OAuth2 refresh token which allows a client to request new +// tokens on the end user's behalf. +type RefreshToken struct { + ID string + + // A single token that's rotated every time the refresh token is refreshed. + // + // May be empty. + Token string + ObsoleteToken string + + CreatedAt time.Time + LastUsed time.Time + + // Client this refresh token is valid for. + ClientID string + + // Authentication data provided by an upstream source. + ConnectorID string + ConnectorData []byte + Claims Claims + + // Scopes present in the initial request. Refresh requests may specify a set + // of scopes different from the initial request when refreshing a token, + // however those scopes must be encompassed by this set. + Scopes []string + + // Nonce value supplied during the initial redirect. This is required to be part + // of the claims of any future id_token generated by the client. + Nonce string +} + +// RefreshTokenRef is a reference object that contains metadata about refresh tokens. +type RefreshTokenRef struct { + ID string + + // Client the refresh token is valid for. + ClientID string + + CreatedAt time.Time + LastUsed time.Time +} + +// OfflineSessions objects are sessions pertaining to users with refresh tokens. +type OfflineSessions struct { + // UserID of an end user who has logged into the server. + UserID string + + // The ID of the connector used to login the user. + ConnID string + + // Refresh is a hash table of refresh token reference objects + // indexed by the ClientID of the refresh token. + Refresh map[string]*RefreshTokenRef + + // Authentication data provided by an upstream source. + ConnectorData []byte +} + +// Password is an email to password mapping managed by the storage. +type Password struct { + // Email and identifying name of the password. Emails are assumed to be valid and + // determining that an end-user controls the address is left to an outside application. + // + // Emails are case insensitive and should be standardized by the storage. + // + // Storages that don't support an extended character set for IDs, such as '.' and '@' + // (cough cough, kubernetes), must map this value appropriately. + Email string `json:"email"` + + // Bcrypt encoded hash of the password. This package enforces a min cost value of 10 + Hash []byte `json:"hash"` + + // Bcrypt encoded hash of the password set in environment variable of this name. + HashFromEnv string `json:"hashFromEnv"` + + // Optional username to display. NOT used during login. + Username string `json:"username"` + + // Randomly generated user ID. This is NOT the primary ID of the Password object. + UserID string `json:"userID"` +} + +// Connector is an object that contains the metadata about connectors used to login to Dex. +type Connector struct { + // ID that will uniquely identify the connector object. + ID string `json:"id"` + // The Type of the connector. E.g. 'oidc' or 'ldap' + Type string `json:"type"` + // The Name of the connector that is used when displaying it to the end user. + Name string `json:"name"` + // ResourceVersion is the static versioning used to keep track of dynamic configuration + // changes to the connector object made by the API calls. + ResourceVersion string `json:"resourceVersion"` + // Config holds all the configuration information specific to the connector type. Since there + // no generic struct we can use for this purpose, it is stored as a byte stream. + // + // NOTE: This is a bug. The JSON tag should be `config`. + // However, fixing this requires migrating Kubernetes objects for all previously created connectors, + // or making Dex reading both tags and act accordingly. + Config []byte `json:"email"` +} + +// VerificationKey is a rotated signing key which can still be used to verify +// signatures. +type VerificationKey struct { + PublicKey *jose.JSONWebKey `json:"publicKey"` + Expiry time.Time `json:"expiry"` +} + +// Keys hold encryption and signing keys. +type Keys struct { + // Key for creating and verifying signatures. These may be nil. + SigningKey *jose.JSONWebKey + SigningKeyPub *jose.JSONWebKey + + // Old signing keys which have been rotated but can still be used to validate + // existing signatures. + VerificationKeys []VerificationKey + + // The next time the signing key will rotate. + // + // For caching purposes, implementations MUST NOT update keys before this time. + NextRotation time.Time +} + +// NewUserCode returns a randomized 8 character user code for the device flow. +// No vowels are included to prevent accidental generation of words +func NewUserCode() string { + code := randomString(8) + return code[:4] + "-" + code[4:] +} + +func randomString(n int) string { + v := big.NewInt(int64(len(validUserCharacters))) + bytes := make([]byte, n) + for i := 0; i < n; i++ { + c, _ := rand.Int(rand.Reader, v) + bytes[i] = validUserCharacters[c.Int64()] + } + return string(bytes) +} + +// DeviceRequest represents an OIDC device authorization request. It holds the state of a device request until the user +// authenticates using their user code or the expiry time passes. +type DeviceRequest struct { + // The code the user will enter in a browser + UserCode string + // The unique device code for device authentication + DeviceCode string + // The client ID the code is for + ClientID string + // The Client Secret + ClientSecret string + // The scopes the device requests + Scopes []string + // The expire time + Expiry time.Time +} + +// DeviceToken is a structure which represents the actual token of an authorized device and its rotation parameters +type DeviceToken struct { + DeviceCode string + Status string + Token string + Expiry time.Time + LastRequestTime time.Time + PollIntervalSeconds int + PKCE PKCE +} diff --git a/vendor/github.com/dexidp/dex/web/BUILD b/vendor/github.com/dexidp/dex/web/BUILD new file mode 100644 index 00000000..621d3141 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "web", + srcs = ["web.go"], + embedsrcs = [ + "robots.txt", + "static/img/atlassian-crowd-icon.svg", + "static/img/bitbucket-icon.svg", + "static/img/email-icon.svg", + "static/img/gitea-icon.svg", + "static/img/github-icon.svg", + "static/img/gitlab-icon.svg", + "static/img/google-icon.svg", + "static/img/keystone-icon.svg", + "static/img/ldap-icon.svg", + "static/img/linkedin-icon.svg", + "static/img/microsoft-icon.svg", + "static/img/oidc-icon.svg", + "static/img/saml-icon.svg", + "static/main.css", + "templates/approval.html", + "templates/device.html", + "templates/device_success.html", + "templates/error.html", + "templates/footer.html", + "templates/header.html", + "templates/login.html", + "templates/oob.html", + "templates/password.html", + "themes/dark/favicon.png", + "themes/dark/logo.png", + "themes/dark/styles.css", + "themes/light/favicon.png", + "themes/light/logo.png", + "themes/light/styles.css", + ], + importmap = "go.resf.org/peridot/vendor/github.com/dexidp/dex/web", + importpath = "github.com/dexidp/dex/web", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/dexidp/dex/web/robots.txt b/vendor/github.com/dexidp/dex/web/robots.txt new file mode 100644 index 00000000..1f53798b --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/robots.txt @@ -0,0 +1,2 @@ +User-agent: * +Disallow: / diff --git a/vendor/github.com/dexidp/dex/web/static/img/atlassian-crowd-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/atlassian-crowd-icon.svg new file mode 100644 index 00000000..cd94e300 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/atlassian-crowd-icon.svg @@ -0,0 +1,17 @@ + + + + + + + + diff --git a/vendor/github.com/dexidp/dex/web/static/img/bitbucket-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/bitbucket-icon.svg new file mode 100644 index 00000000..72f4eb10 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/bitbucket-icon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/vendor/github.com/dexidp/dex/web/static/img/email-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/email-icon.svg new file mode 100644 index 00000000..10f0d8d0 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/email-icon.svg @@ -0,0 +1,12 @@ + + + + Shape + Created with Sketch. + + + + + diff --git a/vendor/github.com/dexidp/dex/web/static/img/gitea-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/gitea-icon.svg new file mode 100644 index 00000000..afeeacb7 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/gitea-icon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/dexidp/dex/web/static/img/github-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/github-icon.svg new file mode 100644 index 00000000..5d607282 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/github-icon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/vendor/github.com/dexidp/dex/web/static/img/gitlab-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/gitlab-icon.svg new file mode 100644 index 00000000..e8d408fa --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/gitlab-icon.svg @@ -0,0 +1,53 @@ + + + + logo-square + Created with Sketch. + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/dexidp/dex/web/static/img/google-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/google-icon.svg new file mode 100644 index 00000000..d667afdf --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/google-icon.svg @@ -0,0 +1,16 @@ + + + + logo_googleg_48dp + Created with Sketch. + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/dexidp/dex/web/static/img/keystone-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/keystone-icon.svg new file mode 100644 index 00000000..7a30aba1 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/keystone-icon.svg @@ -0,0 +1,12 @@ + + + + + OpenStack_Logo_Mark + + + + + + + diff --git a/vendor/github.com/dexidp/dex/web/static/img/ldap-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/ldap-icon.svg new file mode 100644 index 00000000..506dadc0 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/ldap-icon.svg @@ -0,0 +1,12 @@ + + + + Combined-Shape + Created with Sketch. + + + + + diff --git a/vendor/github.com/dexidp/dex/web/static/img/linkedin-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/linkedin-icon.svg new file mode 100644 index 00000000..409bad5e --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/linkedin-icon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/dexidp/dex/web/static/img/microsoft-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/microsoft-icon.svg new file mode 100644 index 00000000..739c395a --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/microsoft-icon.svg @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/dexidp/dex/web/static/img/oidc-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/oidc-icon.svg new file mode 100644 index 00000000..e2817b0a --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/oidc-icon.svg @@ -0,0 +1,156 @@ + + + + + facebook web + + + + + + + + + + + + + image/svg+xml + + facebook web + + + User:ZyMOS + + + + + + + + + + + + + + + Open Icon Library + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/dexidp/dex/web/static/img/saml-icon.svg b/vendor/github.com/dexidp/dex/web/static/img/saml-icon.svg new file mode 100644 index 00000000..506dadc0 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/img/saml-icon.svg @@ -0,0 +1,12 @@ + + + + Combined-Shape + Created with Sketch. + + + + + diff --git a/vendor/github.com/dexidp/dex/web/static/main.css b/vendor/github.com/dexidp/dex/web/static/main.css new file mode 100644 index 00000000..7ab0316f --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/static/main.css @@ -0,0 +1,148 @@ +* { + box-sizing: border-box; +} + +body { + margin: 0; +} + +.dex-container { + color: #333; + margin: 45px auto; + max-width: 500px; + min-width: 320px; + text-align: center; +} + +.dex-btn { + border-radius: 4px; + border: 0; + box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.25), 0 0 1px rgba(0, 0, 0, 0.25); + cursor: pointer; + font-size: 16px; + padding: 0; +} + +.dex-btn:focus { + outline: none; +} + +.dex-btn:active { + box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + outline: none; +} + +.dex-btn-icon { + background-position: center; + background-repeat: no-repeat; + background-size: 24px; + border-radius: 4px 0 0 4px; + float: left; + height: 36px; + margin-right: 5px; + width: 36px; +} + +.dex-btn-icon--google { + background-color: #FFFFFF; + background-image: url(../static/img/google-icon.svg);; +} + +.dex-btn-icon--local { + background-color: #84B6EF; + background-image: url(../static/img/email-icon.svg); +} + +.dex-btn-icon--gitea { + background-color: #F5F5F5; + background-image: url(../static/img/gitea-icon.svg); +} + +.dex-btn-icon--github { + background-color: #F5F5F5; + background-image: url(../static/img/github-icon.svg); +} + +.dex-btn-icon--gitlab { + background-color: #F5F5F5; + background-image: url(../static/img/gitlab-icon.svg); + background-size: contain; +} + +.dex-btn-icon--keystone { + background-color: #F5F5F5; + background-image: url(../static/img/keystone-icon.svg); + background-size: contain; +} + +.dex-btn-icon--oidc { + background-color: #EBEBEE; + background-image: url(../static/img/oidc-icon.svg); + background-size: contain; +} + +.dex-btn-icon--bitbucket-cloud { + background-color: #205081; + background-image: url(../static/img/bitbucket-icon.svg); +} + +.dex-btn-icon--atlassian-crowd { + background-color: #CFDCEA; + background-image: url(../static/img/atlassian-crowd-icon.svg); +} + +.dex-btn-icon--ldap { + background-color: #84B6EF; + background-image: url(../static/img/ldap-icon.svg); +} + +.dex-btn-icon--saml { + background-color: #84B6EF; + background-image: url(../static/img/saml-icon.svg); +} + +.dex-btn-icon--linkedin { + background-image: url(../static/img/linkedin-icon.svg); + background-size: contain; +} + +.dex-btn-icon--microsoft { + background-image: url(../static/img/microsoft-icon.svg); +} + +.dex-btn-text { + font-weight: 600; + line-height: 36px; + padding: 6px 12px; + text-align: center; +} + +.dex-subtle-text { + color: #999; + font-size: 12px; +} + +.dex-separator { + color: #999; +} + +.dex-list { + color: #999; + display: inline-block; + font-size: 12px; + list-style: circle; + text-align: left; +} + +.dex-error-box { + background-color: #DD1327; + color: #fff; + font-size: 14px; + font-weight: normal; + max-width: 320px; + padding: 4px 0; +} + +.dex-error-box { + margin: 20px auto; +} diff --git a/vendor/github.com/dexidp/dex/web/templates/approval.html b/vendor/github.com/dexidp/dex/web/templates/approval.html new file mode 100644 index 00000000..1c037d2d --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/templates/approval.html @@ -0,0 +1,44 @@ +{{ template "header.html" . }} + +
+

Grant Access

+ +
+
+ {{ if .Scopes }} +
{{ .Client }} would like to:
+
    + {{ range $scope := .Scopes }} +
  • {{ $scope }}
  • + {{ end }} +
+ {{ else }} +
{{ .Client }} has not requested any personal information
+ {{ end }} +
+
+ +
+
+
+ + + + +
+
+
+ + + + +
+
+ +
+ +{{ template "footer.html" . }} diff --git a/vendor/github.com/dexidp/dex/web/templates/device.html b/vendor/github.com/dexidp/dex/web/templates/device.html new file mode 100644 index 00000000..674cbdc3 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/templates/device.html @@ -0,0 +1,23 @@ +{{ template "header.html" . }} + +
+

Enter User Code

+
+
+ {{ if( .UserCode )}} + + {{ else }} + + {{ end }} +
+ + {{ if .Invalid }} +
+ Invalid or Expired User Code +
+ {{ end }} + + +
+ +{{ template "footer.html" . }} diff --git a/vendor/github.com/dexidp/dex/web/templates/device_success.html b/vendor/github.com/dexidp/dex/web/templates/device_success.html new file mode 100644 index 00000000..53b09ce5 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/templates/device_success.html @@ -0,0 +1,8 @@ +{{ template "header.html" . }} + +
+

Login Successful for {{ .ClientName }}

+

Return to your device to continue

+
+ +{{ template "footer.html" . }} diff --git a/vendor/github.com/dexidp/dex/web/templates/error.html b/vendor/github.com/dexidp/dex/web/templates/error.html new file mode 100644 index 00000000..418f76fb --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/templates/error.html @@ -0,0 +1,8 @@ +{{ template "header.html" . }} + +
+

{{ .ErrType }}

+

{{ .ErrMsg }}

+
+ +{{ template "footer.html" . }} diff --git a/vendor/github.com/dexidp/dex/web/templates/footer.html b/vendor/github.com/dexidp/dex/web/templates/footer.html new file mode 100644 index 00000000..5b6e2d65 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/templates/footer.html @@ -0,0 +1,3 @@ + + + diff --git a/vendor/github.com/dexidp/dex/web/templates/header.html b/vendor/github.com/dexidp/dex/web/templates/header.html new file mode 100644 index 00000000..8cf744e5 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/templates/header.html @@ -0,0 +1,20 @@ + + + + + + {{ issuer }} + + + + + + + +
+
+ +
+
+ +
diff --git a/vendor/github.com/dexidp/dex/web/templates/login.html b/vendor/github.com/dexidp/dex/web/templates/login.html new file mode 100644 index 00000000..f432dd00 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/templates/login.html @@ -0,0 +1,19 @@ +{{ template "header.html" . }} + +
+

Log in to {{ issuer }}

+
+ {{ range $c := .Connectors }} + + {{ end }} +
+
+ +{{ template "footer.html" . }} diff --git a/vendor/github.com/dexidp/dex/web/templates/oob.html b/vendor/github.com/dexidp/dex/web/templates/oob.html new file mode 100644 index 00000000..ba84d817 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/templates/oob.html @@ -0,0 +1,9 @@ +{{ template "header.html" . }} + +
+

Login Successful

+

Please copy this code, switch to your application and paste it there:

+ +
+ +{{ template "footer.html" . }} diff --git a/vendor/github.com/dexidp/dex/web/templates/password.html b/vendor/github.com/dexidp/dex/web/templates/password.html new file mode 100644 index 00000000..8c77b26e --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/templates/password.html @@ -0,0 +1,35 @@ +{{ template "header.html" . }} + +
+

Log in to Your Account

+
+
+
+ +
+ +
+
+
+ +
+ +
+ + {{ if .Invalid }} +
+ Invalid {{ .UsernamePrompt }} and password. +
+ {{ end }} + + + + + {{ if .BackLink }} + + {{ end }} +
+ +{{ template "footer.html" . }} diff --git a/vendor/github.com/dexidp/dex/web/themes/dark/favicon.png b/vendor/github.com/dexidp/dex/web/themes/dark/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..bcd5f21bfdcd445325d013080c1c5bf339de11ae GIT binary patch literal 10415 zcmbVShgVZiu)YZaLQm*bLI43lnkXFt1nC_C=|x0TdKDpr9*T4Yr6YoLP>K|3(m}cc z(gG;Gib$xw{LcFa-g)<&oaF4z%+8(J?9P7s#Tx2s&``2d0sugxrHM8M08j_^HsO-HSu`p8{pvW45&QtaCAm!xjDEv8#_BZc;+?WtN;LlMp|eU)4(4) zIeNw%1K*=3AxE#wJ>&0>Qgb|td#1rUD&I9hNiB@304zY>-8m%i0R3?eRPJ%YZ_0|6 z@aNn4CJg|^ACvnppZ$J@yn$XC<$ka4{3~mJAno-Ci0H%r{XzL$c%2$lH+v&N$)fvE z5Ff%H>3iQV#H>=3)oVuZ{@nxI)vAxIPj6y{1vbk-B7kONNK{>}= z4c;xC)ybK*cuEi`vT7l>NZNqj|G9^kuC}lHTuR*7TQV}X70TICav17S<av29D0YmkC zO`EccWqk^3;losIPp!5YyG4W}vk#$A1R^*r7LRqC%B`!l8a;gY&kO}PZ0eWp7nhFM z6fVC2xi;tR?lwfJFsl-UTV>gm)d;Z8S93ON#iSB$vYwGJ7%C7MTqM_`LU|{;t}|5B zrlsc|lm~%@a(A%+AJ%^XEHiJYf_WmTRbeR2@{h7!A}%P%m&X};B8hex6Scu$5M+`| z*FZj#p%BieHA)d=!P!n5<)oqrhOWu6cs+MX%&~%X*34~+CxhOIn8-eizYT${>=}A% zI138#p4qopwZy)&-9{ye~@)j2pFUFN8@!yTkbEOv> zE!ub1%;Lw1K9faxm>Y~<|Mgn5MGnx8ed5Ep0oKvDrW3P^*u2el=oaO1(|tG#%|fmS?!OP?=^4|X?hsleY%#0y)T zvejh--XEj;vY({T?zBegg=MyzZ_CnO;lznU%1CY(2=v11bQ1f98n-Aj{93K zjN$d6l%OaKj{eWFC=2}v%<5qqOr=3j4d7sCr#_76uMI|Ob*_eVvT;1(#p>bmaMwyc zsqGLYQ^7fOC7K8J&95WI2ha8ZnefB`4$=~;}En5ev*FS#F!Eulk-$1 z`AK1^&_E@R>)5R>EFem++#o};y<^o{+D_iiqVVHg^6hZ7u7h|t)edj+gTdw;pV8ZP zkuNzCk0MV3g!Id6Aq5x7>^fJ!IQfu9ggxHpsK5tnt9M7yFHHKSL+uM!rUOiayG$IKOu|^tJ-x4ydKWIW2Y&DU z$R?34+cCWHkwIW&;@z(lf5fB`xez(eEaUs)^u1?vsr~`=c&4I=!q7Ztl1F8n=IgDG z&-vJz{S!1$r#h_oD|@eD;0!WXgSPl@=%BDS*^#^ua}V| zc+w0@wl~(ye6r;4x9#;G9ckef{kGCBC_Yc8emhm+`+m23A5nku%f?YmYfm%H0u$D5 zHVqxuF@yZmV7#Ncm;(Yo3Wy}&EkvGBz?)N`-0C7&;Et;CQJBzIkS!8ZJikcVa=+si zWuGJA#reQvgoM6xn*s%Bir)1*w8vogFQqC79~ETP|C!PK`^CeV=EbCAl|$na*?6UFP)k7KL9?x3Y^oQ(Ij@Q%2P1!M(!4YB%H@NZa7y zcO^noTW4iaHox)DKid`Lt?0LHkFL(9Y!nN%xRe0U)8WzMQ}?4giit)9(Rtq|*U@zO z&n0)&zPi7B=*@Wv-1(eesJfJx&kJ`&R_hUzy>Z3`Ee26B{-yk8DR_Nm?S zVC$`iCucPs!kbB*J6y1IgeQfJ&Mja=GwiI}`iAcuo(i#Io_E|U1eDA7bbT!{ya!V* zV|ACz5NuEL6+pH2r+(mqh+AWPq6MC|Fr)Qee-Hn(Ga@I+D*JFw61y?b{lH=n-@5v` z>y3NMi7xdz)0B1jw$pu*y7BApoUW5@<#7qW&UVY?uh^s(xZ#VOoXtkgd!$k!Tp-~kX| ze=mrcj01`QfQm0EX3@M@07&LE;+Bp9z(7ayPfJiD@rQ3ex#}2&3sDNyI1dZgPv&w= zbi7y*i2#6G8?|2#{r2924tNe#HwcVX7nm^-_@pD{ z8AcS)9H7vjqDYLrTP`YI7S76v0Pv6ktwPzGU~(uRB-05WfAr!}gXR|y7;Y1_!a1~0Yi^(L1w@)F&KJPHom=$nn7ttCY`L}{f_@DpP z@ek)8*PJF@0YC_6f&Rq;3`J1De?u1SK(;@tb<(Nq!{bOGKsNrDatO=m=GrF|v9m)? z-_tZCn}NbZRGjDr5V;GfE=y8FjuQZ0&B{(<%SCw9@2@72XFV_!fVIeEti6w{W;5OTS9Ie!! zuzH0OlhW}U8Mvz%Rn=v)VFx2xvAAYjFeEV!B36=seK%{}2q6bSGz~V;Ge3V~LpRrb zmaSr9DV^@yEG^U#9Iaz{M|e5mKf7^uoZH{Mh-Fnm9jo6R> zjBP?rgkNy+8mD56C>%OXdPswc-k-gTm}!%u9lOkK9CEpPTHqhZg3Ak79=b=MeKDUp z-{;_Gw*oN`=HWa{|I8D=y3g?WB%Zb1_XDk=y4cidwtA}u@R@Q+3(OlB31gWF%!+f} zRGXl!d7K{um-+lG0j4b0mC>{*S7j_&KXVr~CJ+AXI&Zbg$aU3tO3WwhDZ z}V@>{4&Ddzcy|Odiccjg0^0d z6aJzv@vI1I9m@&(L?hW6dF@iS|4$m-yT>lkCN%sjjVT9O>ps6`3Y@sE zarrQPAu%rGXhM~fh%u0ZHC}^Xmg!g4HTyLj?bvZu2xF2q6}mlm9{^s}R-K(&2?>kS zO)p(vzlRtySDR0d>8*Qv+afFwm?qjp$-;4H#xPY?qM5tZ5MEPzUiak_De!x$nSC2M zAK7V<5DT4)vl|%2!g0Y519!;Vs}k!)Mrz;D$cBrBkKY_J7O^QII4d5$P5V2vaT$80 zh@ciec1W!qq@It9?aFw>ncv*Ts532q<6dDA49Uh1S_EeF9KYruK!mVz(*oa~d%w#Z zWFL1yU~u_tA{7)0^)mbn*}&FB^c0JT$z@Y3feVSj+4sNq?6$rwR66PtKYCpdG(|Z& zMa8P^kGXGo;SX;gQBS@!b)$RCe|$QyrADC&-2Nrh)#uLjcd$4l)~?rH2s$zq74h{X z$o`bIVT81XUWr*`j#a2Z2y;v;hO=X}F5`$HMZY0aY_v_7E(Nx(<%(Ebk#3Ii+r)fq zpTy|?F@>-t&#=uWmbY!poRMtm0>X!BO%XbG4-F8Prw!+6DkgcF5b~p1|3#0h4jM&aY>w9NEvA zP~9G`+KEb*8&?lf{Pg(>)fPi2U#&41H)wS4cl>rd(TBhetzyGrB9Z!1wr3*AbY~$3 zmKUuQh!vJ&oH*Z!z>y|fiap>&v9MG4Wc-=JqM-BP(I#+?TN>n*2)u!j#1$U!V~P;h z1*la%A2Il022SMLAf9=DKL42R;H#_o z+bhy}yTzdex%=c$eDj`|K!m+Hr2L5{Yi{d#8#(D)#(==h3DC;bE0-=XfhGCI>TGeB z^l1A(*25=u0Ndx)tM7iHl7Vu3^bZGoUAQxi3G5avu7gL}uvGxw5hzo1nYqr(L(tkR zHJH`e)O}3JFt~6b5iMCj99Qi>Rz(1&v5=zXrvNk%Ssi@={7Ubu#$@pGFl0{c@!T zn$R!aSSa2*`Kri7_ho|mA%|>7^BCzN)fgzeJ=%kQmwFR6$#IYX{i02>9NVn)N;+}_ zIOarIB&NbHBD+fe;JY)weKI31KBPOqX8)@gewc4Gbr8MA_w9SV%Qs^Jhnw4EATaC* zEO0H$q}jFE2Q7HfDT&*&M{BrbN@c?dX#*eg)CvADV!UVeA-)kS3>Gq>7j72|)Kqc=E+I=+}V%kRbaGYAAw_rX)t3 z073ygTA3th07Trf0pjWP&eKR501I5F_%#CTt>4`sB4)%A1x!R|0H6bT_32{$zbzX( z)ooDLBSzezYtULydy2yrziW5&iFjB3t;K)qOGGH=I!4hCW9v_&3bcgvECGN={8HuU z6+LK&>@NxeY#3e;aS+fQQcafcU%HU3@!Vi2&w-t42cF!%_tHib)=sQUtO3A+xgBE_ zN(b86Q*l9qSdwJ|B7tUlWp1#hOM?OwpPmZCUx5yvSY1TAu}k^JR*;+%cYO%>gwB;R z&-A(NJ@dt$B>~bn66&yU7y_GIL|CXoVEDl}U;-XaIyiWlCZT#^c7N+Y(HB&t@85Ml-r%uHe?>T6sQclQ7Lv@7=Xk_}^?gu7Thu+V{07FykNMEM zFWJ2xO|sMAfCq0fvh85MT~(UfoK!WD?im%KN+CA62{7I9)IfA{C+!SOS;!lH_@rXy z*M8?dv0KPh1DO654Vz(6{{HVZi?Ua6Bh872?T_I*N#!O=uyB?Ue5$y3f0$sD0j2dw zcAlc*XGT3E$mGqINIXlC%2(cRdAwcr#ur=1A(xqL((2+1sqk=?V1CF$m5Zlwc1x#r z^Ly_+2V;N_Z7%h_X$(;UqK2IIT70_RDp1{H8{=O>PtG6wC}dexloG`%-SA6LV^2?% zB&A$9B;TQ)e>ZqkQ~8%6o}`|&?4fLV8 zg+1b*P*!`@23x@MbQW|yP`IVN3%C`&74!fi3t48M#kA-{1+|t!lYhZE{girPh zc2ZpeoBCRQ(am+8R`NgM4s|%9o^|wy8j_i;qmk^r-)?cxM+-(8_Cx%6^<6sR~!9Tdj#Qn8-5zcG?rw9(8~DC?gF_6O+65p7zKle^@94 zNxw{ITISM|?UvtFLMqBLa&)us&*@MIB|@xoEBHF3Iu1W$WXApHQO=Y=*R;4ToyEy@ zY_pKR){zwjCw>ko!`A}illgNHDHd-*u#_;KyBU0%Q2a?MJF1a$VtjG3p)2V6o8o97 z(txy@`}{NE2gy_m$gvW;(`iJ=##8QnW^k9-F;rG>HX=;{(j0j8YSwR1$7 z^&x(&i2(n%H=kRvd~RgpGP352J)vG{VZJsM;Z(JDFT*-^3FKgrH%M{KRRFk|I3j#F z$4gxqm;2{!%-w}Wg}-HO5?jd3KHA0EevNbNl*=vsgI4-Zy1I{lyQ`a7>*{%f6qyw7 zBVU&I?zi<+-vkxG9!ju0({(TAE0s&R>PG`X5`KD5SW4qvB>@4uLegg>pVwVm~>1F zWI{28PuVKW+hwaha=ye{sB3l4aUQYkThN%lCEmlRuv))mEYKLwX zef3`koPAcfDHIG}k1Z6~Wcf!QyI(bJKb+iEB~PE0`K-T?r|{1TSqqO4{jSx%{l2rv zQ?45uwLd`NST*8lr<~;-yRz3C6p~?3YRkXX3B^}BEuR;+?mK2wR6%O?pXN)f+F4{wHHRPl=^A8sAsmylzdu!q zUcI-pDbER;=b%ae?X{a?bq)=A+@)(r6!2VciZHrUSJ2 z*d3;heT_=M77MNK+h3l3pxC|whI!@N9z=FVgMLsVpl0X@`T$^VPpn!g5r^hYe~e9K zK?%F6#n}NnaonW_5zz%cpsB)?87d8JVwPa~PQQ^!uY#!@IYDF&QLAB+w%r?CD6LTN41+RUN&lI&yKe)SO8=>Q+#dm~;-S;xD4qJF zOJ_sy`=;g0I0sFlpqhT5%i`##bEeNhHT7k%K{&ZyRrUMQm8rh<@uELp7 zeij9kU%7VKJ0b~2hnpGsB_A80i8guXE;mCXoE;U6=ir<%`U~A<)+`EjT-Y?7jxH z$PLicX(JrG;aqoN>B57Q`FTm!O~))A`}MMC18Aw6VfaFYr^_Hn3s~v`urk6i*YoH8 zJ?j0>F>l}~rUFs)nD^PyYU;l#J3q;jEmn{!eS1OHHCOc9Q|`^QQKyR)s}!;e>zmTx z8R}EYe`dxyC#E=6M3=x2=L5S@hQe{S5{c&aN2G=k3;rqz16=l|{bpRZT@opcpl^4z z2al2;`Q9};?t5#|9$Y~?<~la#>&(BmPvIA}!4?AeA}}$Djv0t~yJ0(w@NazMCb+T~ zbs~N7PkG{5XF?lbNw3W2Ya_dM-u$p=`y;#~(K;W}PIImpd%JIU95iA)N|hQ1&g*Iv zj=0I@3x6@P>BvqAvc`i$;?uqzhi(u5yQzv9eJS!XUz<^(?jBsxICt6eaVH~M40{F( zUN6vj`(j4JWx`xKQy%*@s=Dg8f=`ixnuGOm%Q~}rcQ}dfnK)%q=t|3Y^hM|7{@3|B z?LXAgP!3#P(G)l5z1#w?r@!tt1PaQak~o*i97v`$3UmUq`)mo0#(-a*p?!LF75ISluG$xGDTaP zk*7{L*!vfnkA1jU=_^%!Tz&=3yi}CyZBq8FSH`myuvGRJwiiF2+42M%{T?{D*8i8( zaMl-cWcMXe_`$WPF$oRUb3|Q*5^eTEeVv&^JznuXXv+B9>Bmu;(KB5~NqLRHfm)sM zwut8=?st0Fm86+e0Qo>r$UYN0hs54{G1VD!p4ML*?=}`YW95k8FZ4~{Gv)2l(Kox1 z*D6F-f<4PiuCBK-bfyg5i?zQWq8;@ks`v*Y7|v-rx|@|H8iqZP(Q}Q6tL(OI^YfQP zxG{*nKCkcjp{RaqyfuD(i;T@Wx~Kl7QzES6RLpjne!xutSLFBZ(OOJ4AC~p*q3E7W z@paV#-`)&`;Z>VB1IuZ?UpiMuBu~B>8sFP2j^fu!cwD}HSI%;xs7}M8-!|^~kvsoc z4L_6i89xo}LiI1kljb6%fQU+y3LI62ZgEQ9>ix5C9J1nlp?&N{ygp@S0|tCPn*?aC z&xN%7e1QAQcF!#|dFyMt&PCLAxjYK!{0c3^EoY{?y6l>8Hk-!o(bH5X)Q9Ta;rN@F zvC9qbPy$1#tXC}5Yp0%Md&i8c$X~9=NOYOcE$W0MpCtMp5DFLCC4Zm+hZOO_^~Tr# z{Gkt4-ENym_xF;2^-k_GbMdIvlh3?E*0_j#rpe$c&~z0nQ!-fub2#|1=e)3{A#IbG%7Y=& zPG;d1-h7O8eK_M|vmA$;xRyF15)MPz|H)lBH{9R*Se++9xq|N5&lHFQy0*nychIWG z4xehwolf>Os~icPdXw)8Vhii&ggjF&AN4V|Ah0zTLyEpnaa?DQuCV$yHH~&kS+8`W zYTJTivpuDcmiUL*VW^o4^)`Nbm9&dzZtynqmOa;Zu5CZ##{W171szCHcsB(YfgOV_K~+rq*}_A* zh|mL=R$N^au7Z7-PbuX!!ysijbCV1phjJLhz}X0*86j^CC`^S&>+!^5pn|48p{|y5r5kSe77l3H-2pQcMli_1b zrY}M{h+0Z63D6Jl+$&6_ch(cc13dTjDO6H5{WcWci7`lSrfT(`ltp&XfQel~qEL`K zan+YWX?`PJG~<>{(abeouvZZzMzFNQ8UcNsWfo`aHA5uzBtzs@xZxCrTYeF0s-Rk; zZ;O$79~?l=eq81ee1DTjAu!!|{Qcw3#T0S60s3r7Wg-Y8bS6Kd_!0PRj82~lVHW51P_*fv&FS%;~ldxi0EDri>3;7#WqxI z#*zkx!rceB4HyGB-%xEEy@!1W1{0f%AxB%QuQWTz(%qYvc5n=vM8IkWoop&76^2`? z=T)SHBG7zTMZYl9C{+_2fazfZgP{^Q{A!9U8mxe*IuQ1W!UC%O$bRs%1Fan7XB8sh zDiWrie2RKRqLqSi^im|Uq~1Uux;GO}KRU%iq^LFxi69ueM7qzaQW*kV5%Edkl@{^E z#wiO-Q=c5CF>I{fT|1=g%f3dWk_l6->?I&pXjeU-Ap@m<4+JR^^Zmvr<}`s- zGJrJ^zfg)_{!N^ykBndeUbsr0ME3+d2n>bg3htG`Foq7@{O)&m_s|=?MN&i#8BT{$ z9rn?sP}A$Hj*B_cpXkX9OV8phiTP=R?L;HaoZ-2Ua`kHZ=5S^Z5xkq)rgy1cdKfA3 z;KZUurbK_FG%M@5G>k)i@3prA@*Z1e$$FNpcgI=w7ebNT#Q{Dox}VFU6cR-yDhD!>cCLM z{_;T(o9rxL?b8@GDxW%|`hh=Lso*;s^>S)2QwJFFAX8R#k2ayH);K1cx~NL+NR8Bc zVU|McF9~yKK|tsqXW7A4T5r8)Juh56Eu^?{^<`&S$WYoS;h$uoeI(LjJn9*42aLb;M}-z* z#ta9E2@iZL2tVBQX(>K(UlniX{Xvb1(7N>fwrb8NiQ={e|4!!1h}tcktg}J}9sIwc zSW_B`_2(L$LAEP94lKjV!=L;~2+faM+u9Tc)S*!|L+RBAa-bihFVHBd%;n6h9dOA% zNsJH4!u*3r`YS@a5QQEv>=i2GgrV}^li)|bhDaF>7Zc&YvA6W3SkekYWAq4hedLk& z@HO4i7R6rAZfv%D+4+B7pXDTD@vr^8XUgVmvTV;$AY#efwdtkk@=pmuhkaefh1CdW z5FESLe|(FVPr#heyyhcs4WH$XtdKJkJggMt;9vnnKB^#efYL-HMBgs#VS!Ji8CK^TfW3q2!;0q`~2f4{Qi)WS!1+o<7?1zJi?- zOVnTt!QcSRy5vmllmaxEnid>yzJ*)-%(^l8%*4vdiiOdYVPV&~eVp{QU>RXFynJdC z!7yv@69c{UMgif=QO~qDjPwbc`GM(sOStrYw~F_geSWOT#`S5n=n*W1@bdgUA^{l@ zikrrL(}wPqBcUMBk)e2Gg;Sx7O?Th2keHk)f-r|YUbUZnl)Bza8u&1G7DS877yFju zLe>ysQvf|`u>T=>Z~Z4CXr)^NrvnMelq0WLlW;6>b#EE#6EUpWpRdN$V~hK1`cB<< zY?(MZwqPV7TAJ?p+x(ts5lZ8r)%xi4MQTpuxc(7kta1x@qYT} z$$1ua_~BxGN{ix!L2~;7RiZwQE}h^ zZE^TW0dxp%M;A){GBWak8W}(c?`1|>d6zVFD$4GN?`F?3DJ(csLppOv#l6=Ps2()WuIes^ZZ&=1MrJaDdY3ss?iV;#hEI`sB#*Wx3Q zt`T}v`sv=?_{gD{obAn?rS{mv@BJP3?hp*FecL}F8oT*Gb0~|IKgqX`U0D$8u zCH-5+4G`k|44#roeuiCzd(EwmA)`f@mhoZ5F|>vZlc5t{Fz$cWaN+*Au)cIfAPL#w z<=1eSr!FIKc4F*{EbbO{gYz0%t_uEFC!4*SwYlEWDu%44+Pxola2AmWi%xyirRVQV z`CZoqpveqldGQg`zj?aS3&ahS;dq62ba>Bb)1EgO2QT%{LIUY9dCZXP?>^14Gi)=A z0Km(1Sxwqh>6zkf{qteemC489{J?rx^aZY(MgZV&NwAcc{%cwF?vpAe5;BN0I7e!s ztl7?#M7R3K;U-z8o1J6-5f>#R7`siLwEL3hX_7^ttrv1{c>H9 zWNHgt>oWUiTRGg2AUc{I9Smt%=z!Rf-fRMWZ;h(U&;T#tTcIUxt|ioVD}d!xO=}~TTeUo zl$-nwSpk3tQbcg-O>Xd!V%b?;TihGs`%$K6*2cbnO`L|Mhyc;lTHbUbqL$h^nDcb< z*5hiAqDdp=+-i2hA1P$3OmF0M1s*xqVV)|l1HVyMUcVIs1I9I}DEH%%CVFGp6BXj3_6 z()G`Ix}!CGtZ0bW0ph(0`Jl*qMYOnQ??S*Aom6$x>jxlJgCerB`BFy4`!iQ0*s#II z-2Cvl*g#QL>3Sj+0O00FfN8J4uYikwHn9Fi4ckRh>YKxzpeWAgWB@?g^Jussj>~_Z zO25uJj$A($TTvIGKyOS71Q9|2*X#;29|f70moAkE0e}n4S5~eM5I0E6I?L_O(dUnu zC}hTK5dU12u$~k0pfCEq3I6*#C8g52Qla)W^mZ@TRtRBCozjr$sc*vLnJ(s;oo7cC z4+w$OkOnsL`et5{gS-5ELuY5_u=b)I^?0QF)wR_jMP-Bif3)-u#1iffBq{Xr4+YOB zZ}c;@^xi>#ZqxcmZQ)iZ@L$hYaZ8roRB()Av9{-G5iUpXZ# z5$E668c(af21(*=bk`^u>4Ws5$%+g2MKwA`?AF5NdIQ2rfOGO(w(On#*(9FoH{k)U*nP zW&PXqTC)#xwqKMiv6J$3$y+5QE}56uDA{Wz&+>?fRL$9u-$~u~mdd}J(vH0ywgW}! zjR|^v$9|=4$nvwNr(+^HHn9(9dU10ppalSh62RADFM8j^98Er{G%6LPvU+i`?Q+KoAIkXWO)CHyJk zIpGT;7)$^k)7ye|?h5}nLFFc%vG+@{3pmy!NGGP89=BtY!Kn0ddQ3+Z?d?*@M?hqk z(16Aw$n6Z7r>ds6O>=@JO00Yq-NRNjx$Ej1uTUk;BzovC68{r}97374oIL77UARR=lT9G?3GJ#JLUMRjPuc3 zd9h8;)g!T?1WUVdy8Tki+}xaBPTKhPZ#~SIA`!oj`vyNHt|rAI(HwDCY9V>(Us(PZ zCrij}&P(G#OHuk>q58C`6@bax-EJ9}7?s8W^;!TEAPq&JKR4)`jm>_}U7rHoU*&O? z3DL#y!*guN#CI2$b$$Xc*hd%u&AHNu?8lN|~2JIU(B^U#n zk_iozUfAR2KO*F!oXgbvn50S;H@AD|+Jyg|glY~L5q16bZ@29x9w)MkR}EcTRlV57 zpGKgSA9-0Uc(d(j)%7q|G+st2E~M-oE*1Yf{nqjBmj_5aYu-aa09s?VM1 z9Eu{5BDd2`hvf&E{ZZ|fH$A)420^mJS#5@%3y$qOkPM7E8@QaHnE%ALCB+`AK=)Mw ztN46<=o(X=*f+$lw`l@JqJ6OH748LR3PS}!v+hLjk=ki`#GfbswdJ+Z-Wh0n1feHB z^3YYsj7NJfvp!dpD)LjeEK{w*u2=M^E3y_avu%N-K%qbX*PLs|Nu^z$;%2CxS6OAq zl9*8}f6x7@D%X3U`nFzd-WcnK6GFwcVgFYttw9dy5)iwP1|e7gi-M=??J$5j|O9FNGa&G$N%f*&>uG+7CghTEMyTh=N|8#412<0)eICRE0+-^ z!`t(dm)Fz(muL}FvrU&im>F>}rG9=NL7Fc-_k;+35bC}l1zh-F?GnAF^D*%iAcG0j zIWwaSqCFH#m-m6H-uIBm|EQ!y*@%=;;bn(zgkE;*;~8??PSOhk^mWJ+U&PbEt0K06 z`sHx?baeuQ)+1*EvwxOJ&4=#zPa1?MMSHSMxkg9usv~U8r<8eknldU91OmbN9(b+t|KEr#5Z%rz z`W81A|L?H|%1oTV2AxhtH2}Wdgv{0mkFwM9nvt{w-fSLA`?r*Ti3Q+>UhA{`2QxBB z0px5qYq}Z#$1jPaU-R02(f{a?>ujX{tIL=Gq5nxS02w)%iTZ>ZG& z8V8{EY}0d^%Gi5&7(yp+L~|3K{QpKv=W+Nq8rvvVepB!#;4?<^zm5a22)iwVB!xBi z+5vA?fc{?>O}!ugW>zXcLH`1k_`gz){|wH%<5ADFywZdO-c9lT*Mo?9tazOH6FrK# zWS-q&`msy@Ut0$_c&FAI;N}dqyxtRfOfdiJp%YO=37F-{mkIfNSH`uN^1&+@CjgMa ztwvVM^DpUL7J428lag6~%^7{OG=UK1G5}0HtXteKMbm#gqr4&lsFe^>xmhm|NL2W= z;WHY8tNLdW`h;n+)wZzo^JlM=BX8sK+Wln;=hNxD8Z#=x&)zjv`3A;{N3KMIW9raw_FsT@7H5u-42&=*?GOQD?z}XKyY@`ut&V#G z!6;dvKBpyRla*TpfDvN{7-(HHt|83`^-z_$%!AzU5xr~lGHshMDq`MI^(KcP`Qgha zr&|#@ zILDt9`;7XlsNZf49^|#BM5f^kk8W(5XQ2H8)GlT>gIuh~%5Oto#3-a70;si%FBGUnxXeY7%|qi74ZyuXi**2>;md)?^VfMWtV5jTBrh>yo5ETeF(z5r4Q(Zrf23 zUzIRzU*dOWG!R@_pgvEua$JB9^O*YSi)6F)U>ifl;_tSxb+NGc1C|;(95%N20YshS^V|OHFLCbF~r|hGmLmf3KZ>g+7dM7~-nU zC$gs2zE7Is5*1u8TpeQ(Z^SB%0!b zr!np&sK2eb!feT#y2R>!PU(EszCiIe=MF28C6FABNzs!Kev-e9(?{8gjZM8))N)q~ zKf$S4V4C%8#{-eUdW;SKNr;=ZIiryUUY4lOP$> zSGZ#zM-F|I8;P9^kI61joS^$$=NQfsY=c!QOQ>ETMxA+6s9$?WjZ#y zqyfgc1R@eR$g$s{#O;`RFDQbQ^@az(xEyHn%zNi+Wkc=T6 zcrAuOgRdKjhVceVog<`7Cu|$j*)x#aGm-J5b4qe9ovEc~(e8eRMId+!=<3C7HY?WE z$U)H!hm~qo_qiFC&1Mz2Z{#iS6G%=R*Z$#G*>}d>o>t(sk%<%f&1s|JwI^0i9OCJg z=hLppAGv)L?l2SGTx3DTtv$A@5N{Ww!2);#d)bZ)^%9%2xzvQUbDV3;!d1`&F-Ym6_^X@u7`2fx{q3d^r0tJB{hjDn;7q{`)h{ z%L%fY^s4Vn%09-@O9#?rg2}E&Y;AQqpXZxV1KIhK?Ds6q|GJAQ{)tkNJ~rumIIViS zp+>dMGdYbicX$YD)n;3^^C@Y3F_jRIEx&WgQOtG+))7}SgBMiIXC!> zqWF^IlOhl@(F4);F!kaV9S~cMEpl`RCX~@$}ryrODOEw587J3wU#Y zE8`HGqMBN+fIwkKiNuyxM!!?QFm;*;m4^spRkl(PM2MjzvSOtrsOGnJmcqPjl2W8e z8Y;WmZEI9uQj4yn!u89D@Z5KGX1DKs8hV)Up~V}U7ZSB#aIlE$-}}9A|Ii|QOS?E z)&Hp1(!dM*2bvDQ4{g61dryC%q~n5HTKVRAfHgh`zH-cH zz1I13C7aaJ?q{D(uc+HGv=i9wrN1;#RM0Ac)YgH;~_14enimb+_5}cfVqA?At10kK4m%C-1q}d>hJ8NKccN zo<3_c2fL`4)6mamXMtc5OYZv-DA5>jRi}{Ux&^iG~Uz-jB2q-82 z%c{$E6=d^9x7rI;@go5#WeHB;7uEyejDV-MW`HcN?NR3+yCIRxVEltMHW_|@< zr+0-&XOFS4?Vf6yhx7=XSyXo$*vBpx0?X`?6PZ4Vg-eT%3rCnt8;#s9W8xt=`p zLBFo0R`uFB8bn1x0#)B!LOT9KJh8)mcuU+FW$ zgk+^h5r_anR-CxJI6ceB)g;y4J#TkRok8wZmj9?Gmm{#sFk~OLuFm%Lny&rCB=ta% zsr^G!ii@8;hg9JoJI2tcZK4X*6SP{Dv>DvlxEXosx)imdv6{%cT4q%b|0ZHE1m4ed z09VjXF}K&;lK7rmKyg4;n0ViXpncOTaFSoyl;SRpUggds%0c{!m~ zYadZiXj{9V7&&%+T6_*=sI-c_xkNY`U+>(U+&r*l7qu}99(%JawCH{Js_Y&Ly)AUC ze8IS1U!~THFJKQ5uYaCvN7cmK=uf3@iXohQ;M~0f#s^5{53BpR-CE~0h0crrm^i7n z=j>rfgYlTh~b^uflI$*eTzLgxfdD$I(t5b!Eb6!BpeS zGEwwsVl$Cm*jss@vqOxNyS)&M-5ME!hBp1!VwKU5{UkjoyaOqPtOxN{AkWo(*@)l> zx;QOS2Lmfs=FR3j3F|M19Cw0UG{<*}GGDc<4)a+vOQ|NW-Y}~>KYgX^i!7i5+rtX^ z-zaiOOw6V_!B%ZiVIL{WRA9g~*uW{chTXMZ@dA zJMb)fy;f@&&Ws(4`{w$CVyRm=aMtRH)3$X#&-twzjL>?}N2+yJq@l+#DE;a4dSXz` z6n7^}_n)JQfDjG5MeC{3V7^Rv;al7LSVX1UO&h;xc+va6!%FTAK^ptl1lKx?7WM5| zN(#YqWjh-LK6l`%p^l`c)9*E`5quSGqDqE(%rbRvGnGV%K(kPgE0K1D1LR*t^L~f{Do`z zho67B)Jp1ifWN3b-AT({ux(S&s0uuM0RF^#Z9M2iw=^l)d=9=GJ>i-={fi|HVK3Jv zS%=+*>3d8~*dt4I`!nmyj(onu?1#tJ`0RGv^naY_qmH@eT?#$;9fy7|-Q9W?aYHOL zcpb4`Oul0GmfZGzQ2)}{AlKH6cQ5QBsJv9!0}{p)+%WQj7QG0eJuJjfxp~!6OOBxoc^duc6?(n=x0l+3LP7MDUK{oy1_z zO6%5IYyx?tk4(E`IP`nn2O+2HiBten;?IRBL!g)N*qAnv{83{ExbGD(_j_A~Wm1Q9EnW&IBE6&KzRfS;f+fmzjXlqw-2(Dsa$ADv)0$DfiisU{{HFB4 z9Uc8Lp92IolTq$nu(3rVr^rG3uLHBv6-+1fxa*VTE&t9WUL2A@6ddZr1yDSWcx`2#}~JgXTxIP#ewV5TXx^_@3t)Ik((-C8ghXh@}NJ+g!V`01aP{(}N)PVgbuD zAvmz_6ERwvkAt$bpQoJ+sD*9%+LAD9e%SH9fBGy2a=qnoZ5PB=FO_^X2yl4lAC-j+DlUPzGkvR5@#A$0ijWt9 z&I3#z<+fG@^o)jxGO9BOonlqwaw>n@r(WM5%P$8O<|#|V*UEkw?4>T4>#wHPOYZA5 z;big=r()yW91Fv~Wd~ACV<`>#_U49t{kD}eLN%TWw-h<<7-5d;+_q!2N1+mbZSsb$ zA7x7)HbETZccngMN{CmWx>7K>p|$FPcuQ)J?tL%mTD?#8WqmTFBBl8m7-Em^6y^_~ zG78F`ew<=O27mm{$zul)9p%{#x~Qb=3Nnrx-31GO5=JloOu+v-z1mJz!2WP#7Mxla zAue|@fYe}9^q-*#MGJmVzYpABU9`|e(+{F(ErF@mg2rOa8MIR-=fGXTn*H3buTn^W z85ea4pE-TI*@C`q;?aDdS3Zv|TUwZ|$Rp-{WaW({Hx=?FjSOTH8}Xdfvk!ZIAsju~ ze$F)4d3L46?CGNOhh{G?%Mc^C^W#SImc_a`MO^QZ^K_%E1BbwC&nVFge7g$IN=l+D z;@R1`cyoweIh8L$30IObpGBEX9PpOMMWcCQqx#oY>u*_|LUfF6rC0VJw$e=YJZ%cY>HU)09s_SMBgr_gDcp+=FXXv4O3+YX} z>Z+*I$tq(=8lrzZWj*PZ@m!ie{CxV8uIhzMSA3fC79Z-a_DB)@g(&>$N`Wvj&}m}T z^MJ0D9q?R4C(_l-iC(~?Xted{>TZ^TjL}PfiCpsRFFmTiZ48+pH@PE_x}`oeWNt<{ zOCy%D&bORZ{_?beRVARq>rtKN8OwF%!kC^AeTY}c zw=6TX2&COUnyW2xf!NmWEU3Sl;R}<*srP2ejl8{8%Sj~Qu}<0@Or;;(Fcn5xipZci zC&=!7Q*^xNybo(0CNIYMX?M6$G%@p+&y#GvAeh5sh!sM#ua3X#Z~OCEQ-2TN4?Kz*e+eNZs%KEgiqE-Tb-|?=rgg zo3Z=ix{OjSipX|pDtR*c@Yg{c2hSh(6ge7Sn9Q@}njb=NR&=`Z{?O9to=lC-P|*}fY|^+L z+;#n|#!>TkJ0B9vH8F-0z$!x>zHE{!_+&sPpJ%bYOyDC4ZwVQUT)HcAbgW$~O{Kh> zLec*7$C}TZ*T=skc2I;52hkTrB1e+FukCrA-6hO0+Oc)N`r%imYtpoW!h6pNQQr1@ zCn-`^Et)!dmM1E12x1W4e%FsfX2Ha_oswuL#K**xhs@3-32QhaTbeuxc_Z)8Rq=PE zR#4H8p($pezFhaT$m!hAt7|q94O5b)r51MJu zN4MO4PH9MF*m*AxlZHR3FIo*6|Ea|Fjg;BPJp9?_({o@GVTx0=v)>LEjRcMw9%N#} zDHb24kuzu1- z@4>SKKih{f+Ngx&!e#jP!|kSR=ELF+_;h~Kos@1Z-fc;b^e;@Bkkuua9iET)kBT7i z;oZ&RUof0@lszloGn$UDQ_2usz>RK|UY8A+H`}b;n*1L&?&J2Po{8Mpei;UJ%t*rz z(JrvY@d)r1Ku;K6;UjO_DfIYl{OE^4a{}fzGUVo zqx3mi38j?NPs@If|4x_o~Q(iP-ttjnHedEP^-uWiSm zpa1SRCN@liye)xY^j${1xYegEwaVNS<)myDv%p(YgHOKr2|DY~h)pD9sJq182oY3X zyUMh7wrP42mqD#6%AF`yX1Vibx9s(GpOlaMB)6COKj8hUtfM!YGZ*qFHpkn4b@32y zYzR+x0iUia(S$DWwEDJ8E;ep~y|eFNDnVLl+%Cy0V#>+x+6N>CUhliW85c9X65U5U z2~hbc!C0B5yLr?S+0wnQw|LyRyI*#E#LZ_s=G~0zJ5`v?_OoJxx3D`K7a zoo8gL);ieW;!QxhmYZ^1ZU#rW*DMg}9dQ!vIzv!iE>d&vRL}RfPYFeyi)u5hac48i zs+}mT^0o_X70ZPGYHV=Gws;pD;%sQdmLHGR`&nIG{eeULCj2*=p2V)1N@S^OQwHoF zHRhV$%=vlnKo@f!!oJC(FtE|!(kJO2hfM`iG$XwotFqPaP6P0YC+iuhRNYU~!*!N=^0J}wcIpS#xTOIm!J`|@-J;Px#vsWQHLBc>b-O}pt6pcsg7 zrz3W<3N5_2)3fzy$nmCotP`HK)qKK9wt^)`M);UT&g-h+>4F$eD%t$-q*+ih0{JVe zgrk}*d?%UR`Fc?JhxZf$^gZvR&8D)}={ltj&iHs2 zcYmXOdMAmxV|!R+RALBex$gGSL~!^@pu0N*J;P6LH`}y2?afKw_62o%UK7)fZFBsc z`G;G)goQ8mP-sAId+mct^)z$x+ij*TK4mo*-}cs3rrwatk>m%aoz4R@@{Vjl4Qtap zB;r#&8jMwih2=B8Ws@YMQSBNdg5KX$#Ja!I6+N{?qOZ4&yYJPqX2_ZC4$tJ(J&-7; z3H0E3XH*8G(6T*A=}KM};*Olc-qs~yHZAY{+Iery4ljI#X|$h?ktnQ673q^y9 z-e=F8t6XB1{*xVux~o+C&^Gb@MHn;wc^HKI*y7XGGhy#nklGT+L;pfCn!qZCCbmOU z4Eb)~TH3a1!dh-1!sZpXI|$>qQg(e>GOlv;1LC>mmr&TNf8Z5ro;l5%$N%QSe-;>? zn~`xw+k94-PcBlUUaQmmlw@uAi0ge0jB}V1#1bDKl9=>G3|bzUgisC5Cco{^cQ|n(L}0^n zD14Z}8gzE@6$(PMi&!PrV-1FaKT4Yp7YibCornb9Ef(PY68|`Vd-n0%;I^~n%Gak^ z{u-2qFeQ|}L-UsIj%`1}C84 zkgk4sn;{Ewo9{WwbEoPNe8Z75LIBFfdv+k;i7eGxD|(;yP|@i0w_V(~nlT+^Z7CpW z%V@v{MfEM0j?>#!Ubl#WlkbC8_WpDbChvCkHl8Rz>|ZUFHPW-CgaM=5mhideH5O-g zk>$RAN1g9&g2O{*d8lSDCu#Z9SD8*60{J09skZc~7L)#6*QvxN?%l8#8Kq>ZGT^&^ ztU4`XvFRPo<_|bx2RGPwCzXFCJR*V_mT3QG*Xj_0mesI>_{V_8$1BGrys-D(A zv3$Z$ju5SN<=q}ZvkucG{l;r|_b}c%Qean1hYljGxdFu zsz>dX4g%h>>|pxhvECZI2Gp~Oivtj>+OM^-n?WL_X&2gm#x>qV-6~|E4|Y;AqZtlz zK$_p8%fD{2CYaS1f_LWTuSvmS_JJWp(HWcaj)_iCE&O7?hnnsuVqjFeZKB=@Q}1Mc zNJ4eDRQximj$+W~SLR<-VHCDo*V06~K&-?Yl-!c;+s#Z^YSEn*NpTK3A!CAWf|Um* zyO@^erIt56u*SYaan6Y+r91rA0xsG!_3w{FWS*~zj6DGGxqV389(PL`d7FggoLg{6 zg$uUq>%$(Zw9KAQ1PQq$N5Vtv?O1zZ2eU_kZ95#cw!>(4|CPBq^WG!tVz*TtouGJQ zH*8y5OI*E{)BDd>#Nr4qZm@sOT%@-H#7V^_hPStRE_b2*zQ=pdu;UVij0`OQ@CZX5 zVidahz&o@c^BBC3pw{OuoFN84e_!h5%r%N_H;adp6_J(m_$*Y7cGlT>@`^lf3X3&i zOUXQ1zLVx5EjXS?3xJ|7N5XO>&8`@DFJ*6cR+Hz3-rpR&vs&_CNOEF*ME9(H!M__^ zQb%B6do>kt*Mg{I(G%HK3g-x>rPb#;R|N}LBAu&4;oP(J9vR~CKa${na6ryfBrH(n zYU$z&9iDkVoY!YTbcIUad;ekRLdYh6ssNeBTWx1yy8PLe45GdVbAyCSNl>7YB=xQ= zE!}TOtVvIGM^&hb*Ok$*g~ZwqAq}ug<1bxo--WKYSIJ7}5n{33Ex+18tC2Bi3)51F zRVIRM6{p&K5NyReN|bjj^BCtZ8( zg;EhK56)|k=BHeT%kox=o6V*DWU{ewbF6eYQE|_;f7F2K=a8LXZ2DrQ%5A{Q<=pDg{RoQJrhG4= zwYl@;jY$==k4FtI6irysIikA(OM8ztW$hku*CC)9w+MHTlDVgbzF46Pa@xM5sF(@9 zvOFtk(I;>C?E}(*x+AbbE&apa^PV0_&CY6#@C_HOT@U9)Vm0MoV6yPDV|IGMayR1aHfiAKtF2QK0vCCA?C7T#PoBYUx z{Cbfbf`sC$`VT`jPWO6}4p*x?OGNo^9_t2cPoVs>ILN<0gZK}zE1K}i8fSI|69X%> zhoSU>@EeXFN{)uX2B=5c`O6yMr0IgsVl`eIbwK;mI=sTmKfJ1F|9UzDV*7~1`_qas z(d!iLyMaF&Wlb@<0=R4#U8&x>N$r_M{?Y*S1)(lpxzCGM^?}XSS&~` z<_S&r7c)JxXT7gkuvukJsY`BpufB{}8&(g9|HgXB_uC)UUG}LxGwd!1pY?8xh<8cZ(VQNbQ6jaD z_dqU%pX%0t%Km1iIZw+{@sr)xTJbh+5vaKgiPFAU6Kf8O_X@qR{`wdJ z!1T}1K=HB!DBJnQu57}?V;lJkXA!;4E*alKX9HEqnH{d!VqpeIb_| z<%vdLyC04TeX_Dm2U)lnejfFCc~dA3DWVzn9#E3STA?!EQVe|$m_dL2(3u;D=^kJ% zRDgGh+j+gbZBM;mLOaONK4?Inr!G4T*Fr_IcZAkC>|CKE*9-8N$8NTjviWP%PGCb) zKyvWIaT>qP%sf!~4_TPfq@za-qlOaRyccvaK64)`@!P|O&agDO_eCd3qPnc&!KZ#$ zP}r&qZzvUD>9Eu9nCHc zeKML^?}yI%(Ph)C1ltUmmJ|P~TUHJ$GE^o>M1A~53uxpzuvajoL9=dU_L3UuM0?o$ z?);=omcGZ4)lZQBn`_AbO7oUls3;>5qv{)79i;dBuDd!oVi>1dx&e2^!NH{k>J^;g z4hczO6AT=-DmC6jxQKEnYed?^I?!XECm!;^Kma5B@4nEe3rTbgSt2?+9p4qT^ui&L zH0QGeA=rT$r%E~pE!`Naa|k60HM{jGg{PTtsu_BrH`*_R`{WZdBxo3kmINP1x$x7Z zDZbGivs_TC`0Fnh&6|!ZU|lI%ymy~Q$RaNqVo3<=hqK9lALl6vPXQ9r^-CC@+KoU~ z@aeHLcc5@br@wU`RHhLihdP-lNd4TmT`)rn>%7DmKi@9`wNl3iK?(JoRe;C^IJMzc zi!pO=R3F96ZTNkQdm$O;yP|tFJ4m@D(CszWCMUkVg`C*FGQtGYoO%m@7rC4U7NthU z-7zH;cRrBVb=MKfLEqs8i21ZxpU_cvR5#YFG(hOu=c2l)jWfP?a0rpX#nq$_*L^ZX z*Z=})tKneaZO@rs?q#^6Jks{f&qyjX=H7ockx}x4uBf~YZFxs-a{o#54Wnke&|LKE z60sSw@)LJ`BQzIFMT%jGNgS?;VVq(FU5B zr>1&BNS16t*#lGZ@)H&PREJDqHUN?hSWNV~MG`X{lttcVRucndF|2kc3$>)&h%qo4Bd~349Awq zJGZ#W4JNZTk7Mx3C6LNu1i~2;HOUxh~Nzm2(SRuD0ej! zM|vf6@u$9_Pkoo+;`!nRI`JdZB~XTI|0yEG!|+d&a61vk@uLJs;MO5}CJjyklte3oZ?-0DHWh2B)`I7{kMMhDWVWWn_%{Kp@Ns z2mEz=)12BPjx&4v8(lx9$L(PWhCid%O?F5d^t2*S%AOq`Y`@xz;Q6ir}ha{@$R6f#X*rr{NT`~)c0PA=cEic z)|9BtBvF!6N&3hlAK2pHxiOd~Z|X6--fr4rIZ%=FO$fDj^t}$z`hCagNov37YrB<( zH)voCD<)Hl3O{X0UZ=I3-|rv8DT z^y$S*J$yl$29t|{Uc}JhK_(;FN zFWtbG5ADc!(oe`w`9$xdPg8W7y=vtm@SBB~`$ipV0}$Tx>~y?!iKFRe{4yor@GlF>DA zg6QCyD`5n1s|+$8*qZLbPrIv%Pe5S_Ytys&p-vpZ5}w9460=Faj;byRSYTTvl6ZA& zpEcKQg@#4_;am9ogwTIaE3W%%nh@N>fNUW3qVYpG*K-XQl$EbXL}|wPvYiNlOs`1$ zcJr~7aI~7%26K<4u6SQ?JLaTnhEZM0jFHU8ZW9CL6OkR!I+r$n;;Z+5P#WJVSew?D z`&#qC?X8wD0oXPb3b&K;Vo>jeR~eP6bw#O?WtDghdSPE4v9fM(_hIcCkzXB;SX)=aKJF=r zeTZu!6cJd;bLX6$PJ5qgX~rBbMu6A0mEA8|j{J#%0q4`;Pg_!h_+m!}^-;_P@EA~J z##~=^m|}Jqa7+EVIu
@?BtpQ#wA$>LxdK#+5+WrURor-X?t>qOxGG6+&1{;g6hD zdWp5myLVp|5lW?uSrr4yDdFHZ^ENDpXmhA%x(3;BvgBKw`QT|mYR9*i=BFoVa<^U5 zC!mA_-GCtVTQ(tg?b{EYHAD^f)**)ehW_kRI8@b?&Y*Hc`cPLx_J<%9rtJ+~^tBzJ zzX}md$MuCfojiuB!*@y1)B+7>>xZjqEtj$zG8XWp%*a3b9>Zh7%cY=wyob$HEid64 zoy;o2-t0;(s%~9X7U`;UUHrZw@$LU<<;?$~dfz{8pEgU`%3fJYvSnus?@-7-M6x8Z zWR0M*pvNOYwEDgps%wT5to*tin;X6OxbIx^N_j1m;&wXC6 z=kF_!Ga!?PDAvm|)a#z-w(>lzxIB#bI7I?5D4H zv7Am*2RTjmyP+88*FTjgJpd$!y9|+N_cXTh4*Ud>dDKLE?zhm9sy8xIt+0MuB)__5 zl6iP2#zyMXPa0Qkf$^8eVj3c>8~j%jWz$C8t-1gk<=J~sjwk|X%?U}Psevv!@B+Ri z_2>62wj4ciKBt82cY2H#(+(go57fQp?;SQ3;Cg9&g;{0Dr&gz1j>|Hsr#8fc%W{;z zB`7GhONt0i35tb%e4{0|=Xm*lfpYfS?a7@A?A#r<^WD=DpB`L=qY9jll`pODRm72vUzyF<) z_;i=!TH%ByRH*yYy1NBNA}o&66Q>NnD8$I?Ncn@Xb@^er{-& zDw+i;;Gpwu0u)8~5}eQXrC|~}tTx`+BsGI1@3RlBH4f4KG#6HV0kvy$?FvkWWEJ}25v^j1jl zBHF+9Z_l}6RsHfNXdO=2WU)61VCmmF^ zG$~+j(spmKm@fAS{6in#@Xgn&YBROA60rOXv0!^qUDc35i}mKuEXM^-gkRLA#sP6p}ANnaSFZQ9~j@$BEjoKe{DT{K~SI!mSX6hC85Sei~^1*R-2H`QH zwy(jkU|k-x#Z0sM+!MC)Y~LBDLI=zAvmRR+(t5=kQOG{z)#aPS+EnUtiN{KWIHc)D za#~wY&gE6szAm(diQ6CZ1ZV243+qqFtOleJfYUMSe%0)roG8tiU8T zMRYT_2rwsE@B6?MLdk^uVduC$73bGX<35}V?%lByPg~?@5&!K5=pI`;AXeR@CvdRj$0Xj%gRIsre=T#_ zPgqu|Y3gpcb@bw%9@}jRk9?DYop*U+H!d8oeY4>kpIi)~n;_DHONPe$!G_|$d$5zb zI#izl+jb7LA~vz8SMHTI4v_F)-BZgYI6tKX|J==X+~Wka zzfZ~!pp7cSw2FLE@nznU^~ms<-nJ`-k&aYrrFE6?EvbF%Z3gKE!~f%6MczELA;^K9 zL@Uh^Y|sGBWYl-Gi>d}fbSW;NAH938^&1b7%O7c0Rvly0rMt5YY>Ixnnr|FzeWP?X z{t-t?Ka#&B!~`|_UGxwzLNJxZP>Nd}W+Yw5dJAOV!_!%hpQbn2AXx%fcN4vJLPvfw zG*;dG0u-~XA_4154=0~Ar|Qwf>>0aKkGLKV2SctI1W3H?7T`HojxL{9KO@?I|CLVe ztJ*n})0fhaqnqWJhMK1eY?VX zl`b>udv)HB>rE{!GQw545q9g~sbQZNtM@?{G6AuJS>SXo9xJuFOC;X3>ha*0du+km zx8m%@Q%fq9YAOZUk47(kj1o zmoh2tD3Y=nhAZoNQYG@gzq)&+^T$K0R+yW`KA^53R;U%_%jd9jh4>~3eki%`di#|=@0e)Wh!ha^b=kgBoTrQ5J29W zl?=NRyRxZb*_w5=b~tHbZK$pj2C?^OK41Rs=Y;xM(}(X?sV7r(3gtyzm;dKU$LI5$u>4r!O=Un^A|%#4v(L&ywoOREB`OnNGkz~| zk9|;J1;7l1xefe(pxn11$6BCU z(PV?OzTd6H48D~OQcphkY;4@86ZKBIW!4uh)qml%`7`ud&n)%MfY5!3T{Gci85%y^Nw zfv5Az7SV?K)6XBoB=+t2*&=-+GV=(p)D}wW=5BCXlUf_oi(H3tUTBh3D7p1zx(6y% z20TyYTjtI$^YaneiGEaKL-yPSNiW5Wi15dq=l#~n6Q88#AcnQu@pH}XBC<{kVYuY| z?5~GQduNQC^DAe2cQBt|+U-k#%Cvsnk^D(3XlA^T9{kpLz}b*m-?VM-Gns@7vx3Qi zZ<-5=Tz)2=$r0(VTR+}YNL9qysR%OKqX~!bO#Q~`c#FvLiI}mK;~An`_@DiKHSOLW zav6;7St$1tpTOKFSte9ZsJ2+^?Z0F1_U0Xk6!6x_7F&aB%E|RKi9Fdt)BWR3a8YOX z6@bT6)%F8`yB!;9hz+&D???R8VGF=I9D%KE8IHGAy=6M+7&!FIWAElsovGMte85^j zAUkR`TwuI~ZN6Pf|FlKRh&$vuQCK!jc3j|vxu4ec16nzJv!pXTPp1E>qUyaJaq;EK z-4~tOX{gI=Qv>*HT1SKAIjR zh&55>!~NeG^25Sk%Lp~*h1Opl8XtGrgy&r;E7hbl?+*twvJg7 z)2h!;>@LhY8}4E6+UOlscGINv3{yEf*wwfR&ceB#cBM_0j-d(j&_Y<#56;n8wZXlx zk-uQ)yBr22%z(xa_1JWYM3hCcmo9J~t%I!*TIUT(eV@ zX~tcQc%yvr{(&UXq;z|*)%?dI1}5n--M*pLS#aGzLc@~j;Y`lTd!e>nMjBsSfi^Zt zUDYry1*Q_tEGsJ9+?=MOKZ8XG9<+W3JrtTk#VxPU!Aes#9sU!cl8i*wLmlHhJDn|a z>J5)zuCc9By4~a0$zQtfS#10F_|eX71UHwAjo$hs&{A*t8 zR%*PyZ`H4RH0J1w@s+8O=jbb4*&HdObYuPUV_5zA8+5#ReP$LehiO)tbB8cqr{Z=q z>PJJ_s_*cRgkiGY_*5_{C-U_Vk=W4IejZt^RM}r6VW5kR>H*hvzqIBz9e1|}{A)by5YJ8WC&`ZFb!|K-odMClxJl+iL=}jRk$DC73hCC3Yr)SQTND(u9QyF#x_0d~ zrr-J_16uxefQl7lTbj{`{o>T1ysi2-I0xj4OCv~YeTxuFbYmx>4m52U){uXZUmSgq z{CegqJ30;_Q$1|nG>t1k=8KBG=6Eiczp70R4*B&q7Wv;G26E7pk;%w@w?2rcN?Hf* zpyu}!rxpKy6V7R4Kxer#%45(7V7~awXTSI^{Irv=gvEdNkN4|PjHG?$ z+RNGdsOMePG^ZjyXZfkn>cp2;b(!qwY|Lq5k$LO%MM=SfO{eNL=as1K2AWu95vT^_ z)ZVWY)^Wc|ftsbMddeDvxKI;NDP}MrVbMiiQJoV6dTacPqt=g40tryPF@5bN3ruxR zCW!C?!gy{>1)FYoc81O6cnByFo$IPjTUUkYH2$^s`PJCYJvB2_rnU88!h=wB!OD3n&&d~5Ji|E&anU_B(gDd>vF!GoG_&$+CP9aKBnAFm}q*L~@q zE972x5Ku4Q#Vaou6~6d%?r%g@)G3ISltU)4RY{Bt%(J=A{3eTiL4w^b&k|O@bVU%t zXcy2)qsR?XuQrA$pr;P^Z*c)0YkPs4$JC1#KWx>u196(~PFrhaS$cx2``RXg6p9 zmsI8}6$r;x}9^<`ZaMZUZW!>{Wo3*uF*)mgBO_z*GTLybmk&y72?*4svK5LEb zS)^N`?zMzpZ0I<=bX6dt!C~$Fc30yuN5CdlyFY6Bh%M^RpP{yb!1E<{2um-;niOz# zePf{*JnKE+K9qMQVGB4DAybuPlHhn>f!%3K@;%w8&z+~O9cBCx{CA4N7F7(W_3j9J zbQXCJqUzcIvX0)T$W3{0hkZogjW!c8Kl8*{%wsX03)^)QqZi{!X%v zab@g_4zAKnI+sxH{JQPPvQ3mYdezr^;K0{L#spN+k9Gmyl?ugCOXD8pVYwRK&%W00 zZef(3z`WL4u{b#4gA8IIJ!g7L+#Zcog|@+X|0Lck%8!^Xv-v(XkxO>HNeY_kK51^o zhD!NMlh-z7ghp{gZ>lLO(g2i^lFQfM@h1mBG;y-)3rfKrHnEt0%^Y*%Sk2PN)}!82MBH!ndrQYr3~9mrt4=)?Y6W-sUO6XQ1W_)xub>;*uG~vkkzm;4agSV=bal-x z6;Q^D`gOLzkp{EI$FBsOL)Y(4E&CM>NgrkvM<1@Z^B%{?tVhlONfn+j=U=%pZ@<#; z9R8HOe2Q&aZ7%{bxy72+8q|X53K6ch2y+~DoDSp6noK=~kH&q{WJeFFfF@jL2J4=w z*&z;<){w`M!X-NLjaACD_M-V(b}tKB~t1Yx4Q~F`I~7Ci&E{ib&Kh?KVIL$Ys8^yP{e7KGuHBeeC}4?I{A_q z6Xl7`yJWnCUU!*LFp2e7w$ow~IwbbjBsP6h>}ncjAZOQ1&C=7_39WUxIXjas=0zon ziFwJ&x#9*{Y*rjGh>XMB2Bkw&+mgRW@@@-;KEc>xX+Hafp0a35ws8~DJrrGl-^*Tz zu0g8M@;(O)eSk}F*_Vc$lY_PKp~sli&$mwFZ_IhFP75U3kaMK13d*IssbR!t(G60$ ziAQ8~%f^FcVowUo@n9q^I6vWJ=9!wX@g%sR$%92}S5M<}MxXx7ZL&CQeOO>x;YOY6Ab zf%%N^UvRKMBc;8Sr2!sglW~|H_W8(V1IbmkGKA9e*c+JAgo+EIV+NfTS>^xB(C?OZ zSe|@7_8_(jZp^w>+qNs?VePOZ==r9hsuGPDzkiD&>`R+8y{V1LXvC!t+$eh$##fNH znJUn&-GvT>&y=L*TyGH%YD_&NtH$_J>k#Q^J~D5h#H86shp%LHS{E+q(w-#2BuV__ zvafY!@who5e_G-UB`MEgJTy;f&!*7yA+}k+uWB_@_u3}jT0&*e55e&m<^65fJL|Ln z@+)g1%ryUGN6Oe)7U5W~whcMPRdl+p?1Rlrohb`bX`eH0x{n!}xi>0okUq$1+tS=G z&}z?NbB8GJP4q9uy-4&Eq4m0(g#uUgy45%k!as^RJw3%|X;_%AouaHz58@YoEADC@ z=8fG`>hvzAiRdRk>EUN>=Rzv)_SY$p)%mL0_KM z!ekFz)I#kdX9Sr8-)J^-RB_07f?&tET?nhc^JVu!PH0d>ov62P1~1+PI-29eRxtNm z^Y)i>kfuEzV@|gc$vngr(MSzeb!kB&shNSE2*{)1jcc`(R^1A}!w|1~q5=+L#1|b1x(CP{ocy^HDfMx{ziWa z?9ZpvMrsx>cx|KNLy^6UZ#y5-4WFjbeimLV`KhZ8GGFcaWB!LE3~A7@_ip9$Tw6K) z2&=CDO3Pnk+t<+)e4BKfZeF{pWbLy3KHtOqKF21nE=S8E_Au_zVfkd2eAGQ3fwxTj zt>Lu>FO}<`y{fD5NV+LOBc;KOUsp-NcldLUorJ1)WJzQTg z@_i>*<_c^MY(b6a*sV9XsE(*EaX!-;%3|jo9g;yyO=jrC3jxc8e*Nz=kA!`5bgbIx-U(=g|7hbgfm z%^fV||9V42!$2oHbCzoeNW@kKlPH`yjyk~N2b%Jhu`4>m1rr^;2icoJEji!(v%&Rg zuHP4z86gVa#xnIcPFpkfv1u{Lr=yov{^UbZ@&UWNHep;|&w#LEcW{b@wj6;hwqII` z49O=~v6*9Ycf)l{lNTWX!%?+shS)9|GRK-pbUs2k%Jck=X}atA{2-m&Le4#Mm1oaC zIwIDrFq`e2oCmftLcu(@Z{0%3p0s<-5dxr)nV8DsnE15;h&RAJ76OX12nJ=vlRbq0 zSeTf;OX_tGE%DL+(Vuh(pN^wz>D&MvR)ASHB@hNvUF{@XJ2~Nw8?3{KjB)|QQy@}z zAVysH%`4hbMLv6lJU<9vpGB~hUop}hSGc|>e1%g@f*nTmJ}Gf3 z!_c_)!BzXWYPJw-007L?Dx+r@^h6pEEwpX8+kA}4^l!>Iw6_BO$&avjSku9sTt!8Cn=|wh>>~1n^&$YRs}<0pXZX zNWFw@UnTDDCAvFsMTeM^fHVNKY|1aTil6c=oBC+<0#MBh;Qi3&gq^C)bv&)&_zqhs zQ04~s3``&-2i0}oTIa+DV4YT~8a;1_}X8{Jf{; zH=qfC^WE$1k&dNux*F*bGrF_YLDA>KZQzL8Y~`-Z33jIWq3Q`%#G1keprS8MzO$`6=-Ih#|g_WjzxHtf6P&S=gK>Ks{vNa(Ic|O zQOD8G7r=nK0x?`~0X7C}K|3(m}cc z(gG;Gib$xw{LcFa-g)<&oaF4z%+8(J?9P7s#Tx2s&``2d0sugxrHM8M08j_^HsO-HSu`p8{pvW45&QtaCAm!xjDEv8#_BZc;+?WtN;LlMp|eU)4(4) zIeNw%1K*=3AxE#wJ>&0>Qgb|td#1rUD&I9hNiB@304zY>-8m%i0R3?eRPJ%YZ_0|6 z@aNn4CJg|^ACvnppZ$J@yn$XC<$ka4{3~mJAno-Ci0H%r{XzL$c%2$lH+v&N$)fvE z5Ff%H>3iQV#H>=3)oVuZ{@nxI)vAxIPj6y{1vbk-B7kONNK{>}= z4c;xC)ybK*cuEi`vT7l>NZNqj|G9^kuC}lHTuR*7TQV}X70TICav17S<av29D0YmkC zO`EccWqk^3;losIPp!5YyG4W}vk#$A1R^*r7LRqC%B`!l8a;gY&kO}PZ0eWp7nhFM z6fVC2xi;tR?lwfJFsl-UTV>gm)d;Z8S93ON#iSB$vYwGJ7%C7MTqM_`LU|{;t}|5B zrlsc|lm~%@a(A%+AJ%^XEHiJYf_WmTRbeR2@{h7!A}%P%m&X};B8hex6Scu$5M+`| z*FZj#p%BieHA)d=!P!n5<)oqrhOWu6cs+MX%&~%X*34~+CxhOIn8-eizYT${>=}A% zI138#p4qopwZy)&-9{ye~@)j2pFUFN8@!yTkbEOv> zE!ub1%;Lw1K9faxm>Y~<|Mgn5MGnx8ed5Ep0oKvDrW3P^*u2el=oaO1(|tG#%|fmS?!OP?=^4|X?hsleY%#0y)T zvejh--XEj;vY({T?zBegg=MyzZ_CnO;lznU%1CY(2=v11bQ1f98n-Aj{93K zjN$d6l%OaKj{eWFC=2}v%<5qqOr=3j4d7sCr#_76uMI|Ob*_eVvT;1(#p>bmaMwyc zsqGLYQ^7fOC7K8J&95WI2ha8ZnefB`4$=~;}En5ev*FS#F!Eulk-$1 z`AK1^&_E@R>)5R>EFem++#o};y<^o{+D_iiqVVHg^6hZ7u7h|t)edj+gTdw;pV8ZP zkuNzCk0MV3g!Id6Aq5x7>^fJ!IQfu9ggxHpsK5tnt9M7yFHHKSL+uM!rUOiayG$IKOu|^tJ-x4ydKWIW2Y&DU z$R?34+cCWHkwIW&;@z(lf5fB`xez(eEaUs)^u1?vsr~`=c&4I=!q7Ztl1F8n=IgDG z&-vJz{S!1$r#h_oD|@eD;0!WXgSPl@=%BDS*^#^ua}V| zc+w0@wl~(ye6r;4x9#;G9ckef{kGCBC_Yc8emhm+`+m23A5nku%f?YmYfm%H0u$D5 zHVqxuF@yZmV7#Ncm;(Yo3Wy}&EkvGBz?)N`-0C7&;Et;CQJBzIkS!8ZJikcVa=+si zWuGJA#reQvgoM6xn*s%Bir)1*w8vogFQqC79~ETP|C!PK`^CeV=EbCAl|$na*?6UFP)k7KL9?x3Y^oQ(Ij@Q%2P1!M(!4YB%H@NZa7y zcO^noTW4iaHox)DKid`Lt?0LHkFL(9Y!nN%xRe0U)8WzMQ}?4giit)9(Rtq|*U@zO z&n0)&zPi7B=*@Wv-1(eesJfJx&kJ`&R_hUzy>Z3`Ee26B{-yk8DR_Nm?S zVC$`iCucPs!kbB*J6y1IgeQfJ&Mja=GwiI}`iAcuo(i#Io_E|U1eDA7bbT!{ya!V* zV|ACz5NuEL6+pH2r+(mqh+AWPq6MC|Fr)Qee-Hn(Ga@I+D*JFw61y?b{lH=n-@5v` z>y3NMi7xdz)0B1jw$pu*y7BApoUW5@<#7qW&UVY?uh^s(xZ#VOoXtkgd!$k!Tp-~kX| ze=mrcj01`QfQm0EX3@M@07&LE;+Bp9z(7ayPfJiD@rQ3ex#}2&3sDNyI1dZgPv&w= zbi7y*i2#6G8?|2#{r2924tNe#HwcVX7nm^-_@pD{ z8AcS)9H7vjqDYLrTP`YI7S76v0Pv6ktwPzGU~(uRB-05WfAr!}gXR|y7;Y1_!a1~0Yi^(L1w@)F&KJPHom=$nn7ttCY`L}{f_@DpP z@ek)8*PJF@0YC_6f&Rq;3`J1De?u1SK(;@tb<(Nq!{bOGKsNrDatO=m=GrF|v9m)? z-_tZCn}NbZRGjDr5V;GfE=y8FjuQZ0&B{(<%SCw9@2@72XFV_!fVIeEti6w{W;5OTS9Ie!! zuzH0OlhW}U8Mvz%Rn=v)VFx2xvAAYjFeEV!B36=seK%{}2q6bSGz~V;Ge3V~LpRrb zmaSr9DV^@yEG^U#9Iaz{M|e5mKf7^uoZH{Mh-Fnm9jo6R> zjBP?rgkNy+8mD56C>%OXdPswc-k-gTm}!%u9lOkK9CEpPTHqhZg3Ak79=b=MeKDUp z-{;_Gw*oN`=HWa{|I8D=y3g?WB%Zb1_XDk=y4cidwtA}u@R@Q+3(OlB31gWF%!+f} zRGXl!d7K{um-+lG0j4b0mC>{*S7j_&KXVr~CJ+AXI&Zbg$aU3tO3WwhDZ z}V@>{4&Ddzcy|Odiccjg0^0d z6aJzv@vI1I9m@&(L?hW6dF@iS|4$m-yT>lkCN%sjjVT9O>ps6`3Y@sE zarrQPAu%rGXhM~fh%u0ZHC}^Xmg!g4HTyLj?bvZu2xF2q6}mlm9{^s}R-K(&2?>kS zO)p(vzlRtySDR0d>8*Qv+afFwm?qjp$-;4H#xPY?qM5tZ5MEPzUiak_De!x$nSC2M zAK7V<5DT4)vl|%2!g0Y519!;Vs}k!)Mrz;D$cBrBkKY_J7O^QII4d5$P5V2vaT$80 zh@ciec1W!qq@It9?aFw>ncv*Ts532q<6dDA49Uh1S_EeF9KYruK!mVz(*oa~d%w#Z zWFL1yU~u_tA{7)0^)mbn*}&FB^c0JT$z@Y3feVSj+4sNq?6$rwR66PtKYCpdG(|Z& zMa8P^kGXGo;SX;gQBS@!b)$RCe|$QyrADC&-2Nrh)#uLjcd$4l)~?rH2s$zq74h{X z$o`bIVT81XUWr*`j#a2Z2y;v;hO=X}F5`$HMZY0aY_v_7E(Nx(<%(Ebk#3Ii+r)fq zpTy|?F@>-t&#=uWmbY!poRMtm0>X!BO%XbG4-F8Prw!+6DkgcF5b~p1|3#0h4jM&aY>w9NEvA zP~9G`+KEb*8&?lf{Pg(>)fPi2U#&41H)wS4cl>rd(TBhetzyGrB9Z!1wr3*AbY~$3 zmKUuQh!vJ&oH*Z!z>y|fiap>&v9MG4Wc-=JqM-BP(I#+?TN>n*2)u!j#1$U!V~P;h z1*la%A2Il022SMLAf9=DKL42R;H#_o z+bhy}yTzdex%=c$eDj`|K!m+Hr2L5{Yi{d#8#(D)#(==h3DC;bE0-=XfhGCI>TGeB z^l1A(*25=u0Ndx)tM7iHl7Vu3^bZGoUAQxi3G5avu7gL}uvGxw5hzo1nYqr(L(tkR zHJH`e)O}3JFt~6b5iMCj99Qi>Rz(1&v5=zXrvNk%Ssi@={7Ubu#$@pGFl0{c@!T zn$R!aSSa2*`Kri7_ho|mA%|>7^BCzN)fgzeJ=%kQmwFR6$#IYX{i02>9NVn)N;+}_ zIOarIB&NbHBD+fe;JY)weKI31KBPOqX8)@gewc4Gbr8MA_w9SV%Qs^Jhnw4EATaC* zEO0H$q}jFE2Q7HfDT&*&M{BrbN@c?dX#*eg)CvADV!UVeA-)kS3>Gq>7j72|)Kqc=E+I=+}V%kRbaGYAAw_rX)t3 z073ygTA3th07Trf0pjWP&eKR501I5F_%#CTt>4`sB4)%A1x!R|0H6bT_32{$zbzX( z)ooDLBSzezYtULydy2yrziW5&iFjB3t;K)qOGGH=I!4hCW9v_&3bcgvECGN={8HuU z6+LK&>@NxeY#3e;aS+fQQcafcU%HU3@!Vi2&w-t42cF!%_tHib)=sQUtO3A+xgBE_ zN(b86Q*l9qSdwJ|B7tUlWp1#hOM?OwpPmZCUx5yvSY1TAu}k^JR*;+%cYO%>gwB;R z&-A(NJ@dt$B>~bn66&yU7y_GIL|CXoVEDl}U;-XaIyiWlCZT#^c7N+Y(HB&t@85Ml-r%uHe?>T6sQclQ7Lv@7=Xk_}^?gu7Thu+V{07FykNMEM zFWJ2xO|sMAfCq0fvh85MT~(UfoK!WD?im%KN+CA62{7I9)IfA{C+!SOS;!lH_@rXy z*M8?dv0KPh1DO654Vz(6{{HVZi?Ua6Bh872?T_I*N#!O=uyB?Ue5$y3f0$sD0j2dw zcAlc*XGT3E$mGqINIXlC%2(cRdAwcr#ur=1A(xqL((2+1sqk=?V1CF$m5Zlwc1x#r z^Ly_+2V;N_Z7%h_X$(;UqK2IIT70_RDp1{H8{=O>PtG6wC}dexloG`%-SA6LV^2?% zB&A$9B;TQ)e>ZqkQ~8%6o}`|&?4fLV8 zg+1b*P*!`@23x@MbQW|yP`IVN3%C`&74!fi3t48M#kA-{1+|t!lYhZE{girPh zc2ZpeoBCRQ(am+8R`NgM4s|%9o^|wy8j_i;qmk^r-)?cxM+-(8_Cx%6^<6sR~!9Tdj#Qn8-5zcG?rw9(8~DC?gF_6O+65p7zKle^@94 zNxw{ITISM|?UvtFLMqBLa&)us&*@MIB|@xoEBHF3Iu1W$WXApHQO=Y=*R;4ToyEy@ zY_pKR){zwjCw>ko!`A}illgNHDHd-*u#_;KyBU0%Q2a?MJF1a$VtjG3p)2V6o8o97 z(txy@`}{NE2gy_m$gvW;(`iJ=##8QnW^k9-F;rG>HX=;{(j0j8YSwR1$7 z^&x(&i2(n%H=kRvd~RgpGP352J)vG{VZJsM;Z(JDFT*-^3FKgrH%M{KRRFk|I3j#F z$4gxqm;2{!%-w}Wg}-HO5?jd3KHA0EevNbNl*=vsgI4-Zy1I{lyQ`a7>*{%f6qyw7 zBVU&I?zi<+-vkxG9!ju0({(TAE0s&R>PG`X5`KD5SW4qvB>@4uLegg>pVwVm~>1F zWI{28PuVKW+hwaha=ye{sB3l4aUQYkThN%lCEmlRuv))mEYKLwX zef3`koPAcfDHIG}k1Z6~Wcf!QyI(bJKb+iEB~PE0`K-T?r|{1TSqqO4{jSx%{l2rv zQ?45uwLd`NST*8lr<~;-yRz3C6p~?3YRkXX3B^}BEuR;+?mK2wR6%O?pXN)f+F4{wHHRPl=^A8sAsmylzdu!q zUcI-pDbER;=b%ae?X{a?bq)=A+@)(r6!2VciZHrUSJ2 z*d3;heT_=M77MNK+h3l3pxC|whI!@N9z=FVgMLsVpl0X@`T$^VPpn!g5r^hYe~e9K zK?%F6#n}NnaonW_5zz%cpsB)?87d8JVwPa~PQQ^!uY#!@IYDF&QLAB+w%r?CD6LTN41+RUN&lI&yKe)SO8=>Q+#dm~;-S;xD4qJF zOJ_sy`=;g0I0sFlpqhT5%i`##bEeNhHT7k%K{&ZyRrUMQm8rh<@uELp7 zeij9kU%7VKJ0b~2hnpGsB_A80i8guXE;mCXoE;U6=ir<%`U~A<)+`EjT-Y?7jxH z$PLicX(JrG;aqoN>B57Q`FTm!O~))A`}MMC18Aw6VfaFYr^_Hn3s~v`urk6i*YoH8 zJ?j0>F>l}~rUFs)nD^PyYU;l#J3q;jEmn{!eS1OHHCOc9Q|`^QQKyR)s}!;e>zmTx z8R}EYe`dxyC#E=6M3=x2=L5S@hQe{S5{c&aN2G=k3;rqz16=l|{bpRZT@opcpl^4z z2al2;`Q9};?t5#|9$Y~?<~la#>&(BmPvIA}!4?AeA}}$Djv0t~yJ0(w@NazMCb+T~ zbs~N7PkG{5XF?lbNw3W2Ya_dM-u$p=`y;#~(K;W}PIImpd%JIU95iA)N|hQ1&g*Iv zj=0I@3x6@P>BvqAvc`i$;?uqzhi(u5yQzv9eJS!XUz<^(?jBsxICt6eaVH~M40{F( zUN6vj`(j4JWx`xKQy%*@s=Dg8f=`ixnuGOm%Q~}rcQ}dfnK)%q=t|3Y^hM|7{@3|B z?LXAgP!3#P(G)l5z1#w?r@!tt1PaQak~o*i97v`$3UmUq`)mo0#(-a*p?!LF75ISluG$xGDTaP zk*7{L*!vfnkA1jU=_^%!Tz&=3yi}CyZBq8FSH`myuvGRJwiiF2+42M%{T?{D*8i8( zaMl-cWcMXe_`$WPF$oRUb3|Q*5^eTEeVv&^JznuXXv+B9>Bmu;(KB5~NqLRHfm)sM zwut8=?st0Fm86+e0Qo>r$UYN0hs54{G1VD!p4ML*?=}`YW95k8FZ4~{Gv)2l(Kox1 z*D6F-f<4PiuCBK-bfyg5i?zQWq8;@ks`v*Y7|v-rx|@|H8iqZP(Q}Q6tL(OI^YfQP zxG{*nKCkcjp{RaqyfuD(i;T@Wx~Kl7QzES6RLpjne!xutSLFBZ(OOJ4AC~p*q3E7W z@paV#-`)&`;Z>VB1IuZ?UpiMuBu~B>8sFP2j^fu!cwD}HSI%;xs7}M8-!|^~kvsoc z4L_6i89xo}LiI1kljb6%fQU+y3LI62ZgEQ9>ix5C9J1nlp?&N{ygp@S0|tCPn*?aC z&xN%7e1QAQcF!#|dFyMt&PCLAxjYK!{0c3^EoY{?y6l>8Hk-!o(bH5X)Q9Ta;rN@F zvC9qbPy$1#tXC}5Yp0%Md&i8c$X~9=NOYOcE$W0MpCtMp5DFLCC4Zm+hZOO_^~Tr# z{Gkt4-ENym_xF;2^-k_GbMdIvlh3?E*0_j#rpe$c&~z0nQ!-fub2#|1=e)3{A#IbG%7Y=& zPG;d1-h7O8eK_M|vmA$;xRyF15)MPz|H)lBH{9R*Se++9xq|N5&lHFQy0*nychIWG z4xehwolf>Os~icPdXw)8Vhii&ggjF&AN4V|Ah0zTLyEpnaa?DQuCV$yHH~&kS+8`W zYTJTivpuDcmiUL*VW^o4^)`Nbm9&dzZtynqmOa;Zu5CZ##{W171szCHcsB(YfgOV_K~+rq*}_A* zh|mL=R$N^au7Z7-PbuX!!ysijbCV1phjJLhz}X0*86j^CC`^S&>+!^5pn|48p{|y5r5kSe77l3H-2pQcMli_1b zrY}M{h+0Z63D6Jl+$&6_ch(cc13dTjDO6H5{WcWci7`lSrfT(`ltp&XfQel~qEL`K zan+YWX?`PJG~<>{(abeouvZZzMzFNQ8UcNsWfo`aHA5uzBtzs@xZxCrTYeF0s-Rk; zZ;O$79~?l=eq81ee1DTjAu!!|{Qcw3#T0S60s3r7Wg-Y8bS6Kd_!0PRj82~lVHW51P_*fv&FS%;~ldxi0EDri>3;7#WqxI z#*zkx!rceB4HyGB-%xEEy@!1W1{0f%AxB%QuQWTz(%qYvc5n=vM8IkWoop&76^2`? z=T)SHBG7zTMZYl9C{+_2fazfZgP{^Q{A!9U8mxe*IuQ1W!UC%O$bRs%1Fan7XB8sh zDiWrie2RKRqLqSi^im|Uq~1Uux;GO}KRU%iq^LFxi69ueM7qzaQW*kV5%Edkl@{^E z#wiO-Q=c5CF>I{fT|1=g%f3dWk_l6->?I&pXjeU-Ap@m<4+JR^^Zmvr<}`s- zGJrJ^zfg)_{!N^ykBndeUbsr0ME3+d2n>bg3htG`Foq7@{O)&m_s|=?MN&i#8BT{$ z9rn?sP}A$Hj*B_cpXkX9OV8phiTP=R?L;HaoZ-2Ua`kHZ=5S^Z5xkq)rgy1cdKfA3 z;KZUurbK_FG%M@H%2n;Dm*U$_gNQZ=kq*6mmcZhUKH|OU0 zzVCI;UvT_EFZb-d_S$Q&+G~C8C~Zw8Tr5f~5D0{;qAafi0)b0FAf$Q>G+>16AB_d@ z4~3V4ftRkUjhF8`4{MN&m8*p{t%~zITWcNbcUB+V5!T`$kU3mMUIy}het${Bg;;H- z^IyyMh^v7NB@ZH|25Dj_(IuV1)E`wvpTHLQ%bbCSMR~}+HaIe@O8M+B8`(K{cjmIVV*4S3oB)W@ANSp$^RV< zqLmIWv4>3txez7H%RFay6tEC$NAa|%CLTmOG_iDH6RHDCd6=-ATN&88 zp1*JB*PYk$(O@+A@0<~Yc_D@^e+MnJ9bRD8p7nqAJImgB*c%La{NV*Iec6wP#cm9Fp>Wr52%OAP-bv&JADzPfYh|H*|YWq{}kVWrE_@nVzX z+*q-;v~YQb$@xFgyrY*6ryZV5%{=T(%N&25 zKr##yb$91nTj<`=bFJ3m8*0;+Y#H8*PjyMs6JRX=kJ!8t6ia3-WUJIhP2%e@yb-MP z&}73aWVqYtOzXMbUxo}2gX#kkcl4rwll*{nLKLIf^;%RCJ}>PAc0}B`B)Pu{|6!!p z9{Z`D9heK^j|P!a_VKpIs-R8rAf-rE71LDBgKT%Cj_$XzD+HdV*r6e@@qgtbKl*`i z`Dkin`D(K0=DuZM$p&?ThM}u8Kg&ulwVwZJ=Hw?3sY5Xm7?D0;A3LDze)MAac#rZ{ z=k#2_E4kBL#SsOVBz=ydoi7Of>cAq9{I(9~V5asr4eniW#FJ*QpMMnEGZr2>60(NZ zp@O7^fvFzguapW2yALf?O`EWIJnCRJQ2O4r%S8}b-07_bLYN!`g&vzLT4Sg;M9`+* zrYd+JWOB4hd4bD7t3pq<-pTx)`%76T)T;V#y`m$Ajz(2adlpzFOVBHsnr!YIU-|^l zdJt6O*t}P;3#uN9WNO5Kfj}?46JVXw)roc}4u5Q|zYTxPCN4ov;6;W2zJOTE4%vwa zJf==+s5w=n5c2Iv%8Xw*nGqMqwEhg9=930Jzz@RPe)c4k8Z=~xL9}LT(6;=?Ph^;y zCo*hDiyE~V^YqSA$6KpLd$ms*0D2+gz^5;Z&0MIkjrxVN|8i5NYN0Z6E(>$|h|CMbI(vw*3Vz@s98r-~fJst1S`wHx%1zrn?{B zHA;bgzl)*)fo8!JkSk|dsX8$;&1iYc1dHi7w4jLK7t3CAD}-q*PqYZ~LdHQ6;IEo? zu`dJ8wuIG=ec@;yos!nPH%AB4N(&(Kp-M{C>u|U931>L9aQX?U=AB8&9}@M{yM+r@w7Yv85x54y8_2> zyiv=MQ6uCgf);54kdF12e<_y=H0coP07uxVBV6p2^Ji z8hr1%Wuq3Apk?dF+@s->f-t@3{@Dl##rjXz|cVlTyIW-l4Ey9_;#3Ada?MR0Q>UH?V=^r|&{`#p{l&%2md;J5KS|L^%(2AaR=;TL+5RNeg)XL3KR0iWxGm>myk^FW| zT>X>Q4YPmp9Fc(Ptb0YpTWNUP<2tdXg0`$liXQseC5wFTZB%O6cEi3NdZDN+&!y zKPV}(>mJOqJ2aBQ8VA9tvRz$XiPL$wt z=3}bpu#v5$`>_9x2PGiP!%Bh6Z^o^X%%?JGTixnfY3 zj4oC%+l3Ltj98)?T>7*G917B-#n7ZPM^Ah<%}FHL_8pw34;Vuwjia=_bB zzMM;UrI4w1Y@BgUPEjjAT}++_xWu850v){`OrNZD zx6of;5MPgjw^%oc@y8#+0#l88z339z!PReU0Yx= zOzGywLtkmYq^W6JWc0>UFtbBKD*J42gVYoDyHHk2V_M6P(B1ft`~TR2?7C*=iHaKD zmLvvuc~oobmW6y4-PFIv59cJ&#k_$9%7KdCs8rW0k1RZ)+^~g z?LIqg#xLUyWNXi!`6Agn6;F=NWJtw(T{dY~y=x9+>SXIeM$sL}1L6iEDW5ydeV{Hc9vYBeh^LolO{y|?5po~_=>XcKkdUDu8HCm$bWhAT z**w+C{CkFJ@WV9Cq2w=|@~9IdPz}ZZq8!xa0M9^{2FNrk795XUahx)Qm`SsBgy^=CspXYE7z@-7Q>1 z7@WdAqtDg%j@k-YpXND*fTqa!L#6lYzK>%El|XC+_dxlk6uM6Xe#24QQbi^(VDP$ zJdrh(cjVe<Ge&y*-Yw(aERC$Mx`iO^9gjNNY{pZ5hp$=lcYViAj6Yp;CfhVRQ zF`%hTn_W#hyrM^wtI-4mzcvNtANbWMkH!-XbHFsFeK$+QNJD=HarVQ0K4;e`?@gTk zQV{?2^ZQ9N{;u6SD>VnrfI=p9s+&<()X{&+W{*-v`9w*nS26qYv>c?v~G_fcwPcEg{p==4*U! zZ?-07wy~g9|9Nsjbs;a^{>M&I=bIbTH=U|jd2n4@Ebh>vz^Air(n~kY?vG0%B5(d; zC|Am(D5l+fAg>X1i+|=B6qAkOvw`AdQ3|9#AgENry!a1=~PK(G<)3Z-yHNB%me_t>IR57E2J zldm8*!-Q0JqjLPnF%0SkQst-o3g0G-j68vM%Be;bSx4e?-j}t#1dtxz+d3!8r}qzV zz9<%cBw704mybKwQj(|eAFd&~()DPW3YY+YwC*G-N(o-9D+w@A0qlA-@$t<5DOA!r zTLCHUyjKC$e>s{mIY3EzVZ`pq`_eiCN%5-Iq=Lh=j4k(w{nI;`*7U!zTYi~!H2$aS zOyC48A4*`A({&YT1NK7cpOI!dh?f&DE`iVs$mX+#EKR^ z9Rzws2++U~+z6j0k~ZxgAapuHU?sT!OI_?^jamWG5vyhbbSi*n83?jn*2qF!H&ZXqfpPalJ(2#2`VW9QZ^sLc*Uhf~ z-4y}L6P-|i&e*p)v#FA!r|2#J86YsZHA@I7)sZ0vG1oi?9wQa8PlI6vJE?395=de3 z8%=;Ods2ryMI-(kKghzX|7pnEry(0a&T5a|nrcoPuQULPpzb|afHg|@U8&~B{Rt8( zRy*YSwQCpH9*ytj*~+t}zJ#C@kg43g#(4gH1#t+tP9P`NL)oJ#4$k;`P(FqSv*zKnR`r7Zo{|wy|l6TWRRX$fWPfmtqzaV zx&qm~_3;iSDmCPZ{TpoB4^On-0X8uIZPJz6wIS|V%}(LDG)o2VhvArqltfLtj+>}` z>s4pY19d2t#+h^T^n<{1WAWDveWLVsMsrkJh5sn9pBpbsJXI}*p#scRx1#UvM|&u6 zU=Or1WmL`uz75C@Q|(@V9MBY8do=d+ zCe5@X>hZM0{=)OghHPgUC>E)ST*k%0e3ri-?Z>;-GG^x?ZaOG?7KlU^6KW8VN*w`>>MY}Ip|tZ(Zxm=4v1qHrx!)Y z5Edr&trh>2xti#yY>20X}GTJ4Vc0ttcObXP8TGX#ZTLjSv{@)H#lJ|iAh}?!KqB;0U~k*pkc6Ysk)A$IC2BDk@RxRpoMvK0 zOb^_q-=X#Bp%a~xP{f|Tl{QBf?;WjnIOwJU{IyU^ZAH?9eWEgR@fA8NTA3Gbp#AYj zYWKNJ^PNDZ^=-@OSakNBW7tr6K`JruD7~(OZA>nG7&Nps;`Ns4a;){=Vw&J1aV?wV9-SH!1m4-K7*0ya}xE(UxR=t%-H#% zbegc#h*bWLOvJN@rW~<8{*gkI=fH1HJ802TuUK9PhZ~uk)O88?{&nK$X%UkGcDu=} zh44+EHFSs0^>zz*Ub-gw3X&fWOGc_n?Yy#|s=51k>SD%X#dRj;=vG5doKk(VRScKX z#x7lIe+ZAdwi@3*(&e&`C`1d)&}5!%E~l<#+-w?XynM(;3zA}x*7NUx@sxwH@Tdo` zWO*ka0;#UgZQa$U)R-oQXB!w#>mB7j-Ho4A<%*~ON>9-Y)yxR|j({1ailFFA02QbQ{?(T0xxHl@k6&Z4EMX_LT8eS%(t350@X*4K5z?3A?2Wb z`AZ+Pc8H>@j!eu=oFdB2P4NmaIDx`ky)Ty-YOLK2xFhxN#s9oG-VhC5Di#RQLDT|a z265p?F@g7du2WxG>Zr%)R@n@iO)lniiWGs-$EL69CQrEfx69e3t^IbD6r`bBK%h7xUm8~U{!(5ix)EpJ`0K)7(d~9Ednt+daA18* z`pZl~)<~6WiwxO0cW(Ag#?Xbk?Vwb&0WjH``CIVOdURq`*%xr?mv#uQ|)=ja7X_hJT=IvJ)FNsx_vz-gNb4 zgBd`1q3Ggrp}$J&!=b@KSVYLqI}YUB@DqFWd5pCIcy12!|%X|Ksm@&ql7pz7*mB<64KB8f4=OO?1g=X-oN-n=bIOwZKF;^CcUl+WLIBTSroGV=Ww z-JQJG-+P{6RB|wNeIj?kYYZH}O{r^<7|u+R-axASx8=>;6#?i{_5?}lVo*Edu`VL_ zvm^50Jx`AR{l5ZARldz6poQ z)5h-ZN+u9be@EB+R8sqo^@pY zhS(WMXN^F-+Pv7Sii@JSEaZqTf(Lc5GX3h{_L*fNQFr|t$l^)%iffkBf-Oce8kFjn zz(cxD>)6le=9z@~SBm%FAKYHQLm0SbVrBDV2Xza=IEfr_WWo9}*UNERIg!BQC-B`2wTE!_%g?;fpNs9Tk``KQwB?|e_(bi@tB+!FvgSdj@{ENBY zy(Pt~11j&OX{%^CA2iG6t`|%v#~k;J#}tjZ=l35^HC^;!J{}lLruoCb-4di290jI| zh@n!w(Yv}GcN}pcL1{ta2>x`xpn<$6bdDP8Uqr1q^Xwj*Ix|n4H7v=+hX=@sXc9m z@i#}Fcd-toc{-=>PfX<8U}&!KOvI?{Sz0+}yv*QiQ)sx9w4WI2%wKNluPc_ zj&FRV$*Huv{vNUDH_bV8+tsxV6z8R{mrq|F=Wt2wk* zYyn}B_aD8m-eY$-?G)ulDHu)}`HjcU%NUmcZkEeFP7Wk_0T~A2aE*N>ZW~EK!S(`k zzlON%*k^1EOToN;ztKxxSZNep3C91~b{rdGWqvy>VUDR&X-nSax=qM0B#|ib=LM$x zR57OJJ7aX1kkVL8N^Y0nX!F_k_DAaxO$6LZ)l!~14@h&y-F1gDGh3!Auq&X}0hP4M z4KMbPwmgHh*o0$k&^tBBRIC_$PQeAQLzX&RRq zX6MVbgVW0!so5skJEn!^j~7obJSFOLX?(wIQ6GkrU9HsjLW%SeJ>`QG97IldtxUUcPG4sL|2-ZQ8>=Z zZCNLCGjGIxBC-}CRw8DaGoEm}kUskhJVT^|`I%2(q?kU0v7}?yC8G~@)O!LJ!+vlv z^!<(3ZdWN#zj+P3-j5+S2(#utQpOdt?YB2`Qz9x@&Js&=M-4rQdJ~j~S*_S%OwBtU z{F$aeo(UhtxKrVx)s)EH;n%j@xtbm-zS-zY`eAbe9a#Z9kc@P}lyaEq~)|9)0j_=$UO8{ks}_!+)uI>3`AVthU84bNX-K zb#ER~DQd}`_oS~6tiegOxBZAiV_Lvq$~)Oyj|}mlX$s+OUtevI#gH<;9Mkwhs&RaV zow0$@*lg%5)NoHDN923!Lnxf!LE#nq1+cA(^P{p_*nM7+-OW#bv%{ZS8tt!OyGIIO zN@u)PAdxZf!Ozt6QbNjw+js7o-H$Xm%%tf+*-m2d{(j&)gJbVCygw7~Nz$|S`H-}1 zSslA%%$nXl|Dhp)B2WllRbbWbqN}2db@_0_cZVh1)js07M9jMPdSa9&&ORdivJ)O` z_KPT+O<0#1-R)#rcq*1*$l7dMWMM|-Aeh0Rp%w!4EoPFE5ewm2PF;aib0ULFj0+q> z54we&r_F1g$33&}SehA~Vm;@51#6X)-_~AtZ+FM6PI{QxhZLd3t)`$V0SLKOZ7nFK&eDWT;EqG`O_D*-d#sO z#-4}=lwPt$|DCUEt*_s5LcR4gW&SLcL*D$1x#L?xDJa#YTGP#;d0Y#RB4#8%7;l+< zHmUgvyTzpp<2Zfx+vxXr&Yb=6k~%Ig3;$PYj{N1j&Uu$OlgiiMqzW1pl&2VnQyi80@LfpXT=f#_2Aao(e7wq3K ztvmV0qqv+jtuBe1q+lMg4zZ=YYnU!Fd2CUVISMC>DUN4FQ}hc07whd8@6=>2)Xl;S ztPfBZ6D1d#X*6puJ*s%bLh!W{MXY^(Qc{NKe%~NbhrbGgo|(DD`KN%qJp(*6g6OW&iKUFuF1T`-FDU&>*9ks|Pds-|7O^PYtJLHA3y37qaGN{SwtyI5~g8VwwO zP&-xvr#@$HDdN0Mrhc>&%VNLW@w_?EzkdVU!=o6SS{@0{Vy9}P-=H4iaEcS4H$KzK zx7Geu&vh5YhA+b(#a<>QK%EZjqUd5pbD>LeOe&ID(CR$&5%o%TA|rqI(cVl8_Q!*) z@$iR!2xPjh#7f1>e*688OrD(adu?RTPiT0lx69A-(My0tG8E2DR{ygN?z)UPpD1D~ zIHl1z&SFNHkPCtXQV3clTdxWj$2XvaEXlut;i@pFs=wbkI@@XKZ(uUil^e0J4tR4@ z5q=HbU}uXK2-O)Z5al6IBG-v9m*g!;FYMiqWRBE%ljibMfHGb3@{!|vXRMqP%G3|0 z;z_p>YkKBj6i2b}qW(YQ?ZyXBSHn$fkHorOz8*vJ zp{h#e!Bhhd6ps>^<1FYMt(CI$RQx{1%e=KrUT*&a6%A;~+`{TAy}8}vS0?4%GlY!y z!jLa{T;q7#SO)HnXpeufm!vi?5@*si=!@nq6o>cps-zT@CL!)@&iJ{pbMwk3A!pa86K34zM%f-z@ z%dPDCfek4o!I7!w*rb-sEs{5SKp8XC^DsEx@>z%SRg{&=D|Miva0qo|!35JCE$Lq! zeNbyFeQ7xR+lG{k7g;C$Y}p|p!SlBZ{Ci9s>+EY*-b{(9*wtcn-#vyp zuI#7`D_iZfvTI3>dl4b8{=OCl#`&G%aVfbGQHhwm*xQk52bkR8dB^4=F-Hn_p3UZG z*^xYpKh2_?7b}T|znV8CYjU6Je2%<4Uj!om-UJS=a`s1MfE&!jRB`zcIv0}(A)Q@k z6WV6d2U07;7U`FUMt+=8w6;7Z3yOl#FYECwP)Stcf4#{x{{!T%8J3srDG40DuJvnD z42HS^nOvGi#mw=m#jVrxN*JCe{=Qu9R$fBCi*F-(Mc5Iz(b>`3p?5r1uSp*Ub+`p# zAv;MDq&EYvc5SNY{(?x=?rNg4nO8ru?D`CMTpD2K^X11ZsDL|y=LH0rZ6}-$u#LXF z$YeFQrq`Obd>>GMX7()j#aniYf5juy17s5v(=F;eXYM1cT(i@^h&qJ1$y=2aH7o#S zN}Is)ZLhq#TyiE30+b1%aF=YjLp6QdmoP8zmAO+#{9b_LH5GOSvaSTPwamruc5F|P zs%wj#3IDvhR1V8FbHD8_)*Z)~#+hy+sqTnQ8$XXs^Nml1lNo+$m8Dh&qq&qjn(opk zG1gkRrHw@O+P*}$LJ_@FjvYY#?~`B9Hy^DO5i_CVU*gQsUIE~eKb_;B{J|yTryPm$r(XdaaI?S5C>nCP=D^qIcWWuI$-1&Z~B@7*j zxVN@=R&^;tyRM|5M4@Q3T37DV#{#bJ6^q2yBhOA+kE*6$HD(NJhhp)pWv;cZUv@&^ zAfKdULkcS&+{ZArT}`BpZ%Go~Rbm-*Zb-JT@1MPoUzp?2KvS>k$+_$~sw zn(LEZeIX{rybI_DfKl?gei6)yF}UDu{E8cUtoZ2mkhORqg<$l}i{HFbw)-D^@-kFp zCM<3YJ3krh+JI*PfJb*2_1(CF+xQY{DTX?eCi;cxFR9>=f=+nyH|iTs2ikLYS#ewR zZ~Qe!#7Q#N4yEv59?fkg9AJAUpwD2DFahqY4sM%*I(Lmrc20*SwmYVwk;sX=J}OR6 z)=pN}^X5+Hy)Ut9_9Q$0hikamDuE1%#B7KJac49{H^z11n7SlW2tz(2EqD{I@=$kA zc2Z*0pM4a=bk8UX9;Sa8^Og`9auI76VrH++M#C3{Xe!Vk9Nq=Th!J4!$3xq7Eeu@ z;{~uOb5wJ%ToHFm0G-Y29)maEw|-V!3yi)~HiY;|iDNajDYiH=un~AbKRc0E>;n>} zs07ijAu2ZhY?Hh}+f*mOeZDS|5&s&4r*4b(yW;d#b}ExO?^j{$?e{NZatUBpbYv77 zAli4PU;0dSOenYkr(dga0Ri*75g8@0O;lve&Ku?WhNt=`NTF9&YmZrCTnD}%;oWio z`cI%v$@0P5^i|{_&3-FhvK!S9I$)=3l2x9K#P1D7MS-@@f9ZgC9i@S`Q$os!dA5RJ zMrva;*yB!(P+-Kn+Q^#+7*9U%97{qLfyocV5p__XERcO_NDaA&$iR3_`|707Jrd_p8k|A;jQ}a z?1msumLxauL3<1fwD~(zmnb{4J?ft9_vr1+Do-+S(){zkwVn9M^SX)SP!+oi23MaC zhPsr(niNt3`V}U1#j7GwEyK-sKnTR|F&mw*2sKeK<66@htbUT*S_KM0K)Bc$J*E5g<1LIr|K{RfI|ZxXg~dNxyyg_~@d!51A-OG)Aw5JP#r<$|jUwPo)M(?z7U3V>gLIqliZuPz8T6E?VLj=Dqh=kU zWB=Y%*2cQj%-RrQ6k1Jnd|$o#eJncx1p=n3sY+s2Cc=;Hc1{o>{UtwWghoniV*YIm zWdts)5L0fHQZ#whk}~u&`|Ci+j&1ORB1PkKPc=Jz;Cdh3{iZXsnT~)i2UuXD`-9f) z1B*;41a`huN>`bS$vIXvcFR9_JFbIvwW#LY6Pe0nZTw7qhDW31vQro9i%>jMpu2g- zN?QoC@`z9-tp+Pvag4f6#wzlj#)xd-Z(;gd9*$tm{<`jw;33-)+~*FYs1tI-xs>*6 ze*7^YQj616W@Swo;z9tUJXm-*7(pzyXXrbWp3)Upu5~LmQq>oG0T)SPE4>8Azpdpj1`yTlak!27RgOS-ZPB6lu){W03FDl zHBt@ke?XO`xs;?HjPkNuK```~22}(H=ohK zVoe0di|a^_0>n5MfjHR9aeyT#?LD=yp5L$GuglJ&f}TNvIhLbptzy;~5@c%MDpJvw zuo{TN4nq(8*a}vCCQyQ$udt^T`~0ws;!r7g#B8HzKH+eMlNSi$oPiw-c8UF~Dtwy+ zJWHiV=$9gOm$xChXzXY7XIt#WFAZa=kqdqx47cN($t2B`>niY2Bm!XLHhKdvtuq&SaDAK_@dZ`Zfqv&LtP<=gD zhZlpPE{>NUc4t7?jUR%Dhmz=R3vDfgo&#j(;s$&`J49hlZwn01ya@w z$UQH=^06KvC*Un12g)L}irE7|mH3!8Dw1>OcAJ@X&3lIFC9Y6vX_rXDb7+wdij?FH zR}|X>avIKyldukP4wR7qv?tBE`6p}4JBvdAf!f)C;WF=xu}7aPs9Ij z1!y)KwzK?=h-+BgMT23rNsC&#g>BMgu0ES{9{HJ6{{?`~K6`A&JInP|ZEHBLiKYq7 znSFiC$?2gbTuiYKyuUwWF{|CwMm*HnpwqQ*)G@LDM&FN{!_vgx=35&t3hYzF6T&}R zOdVqVN#X?s9`k)D9`{tBKo&y?Sd>8&PNLN4%IvsX65bW5m+@_#=@!{QdapJt`~0m3 z3|bS=!TKrA{6z3&%!`)}+C3Yj8pmR|HYQ_4ziAmpYGAcrEi$|e{XKy`e$4{hRan8z z>ZcLyc``Yi3*l<_EQpqrPnAFnfX z;>>=SKA{hkfWefQ5wg(FGKZAJ&M@vsz^B|*U1hpiNvV>Bnnw<{gSmWd4CuD zDT=N(do+lVo9X;(h+!u1@~gWON8nPw;(|dpWZ%}R(b8~l-fFO}Q~4D7KY!(bV;*Jp z5y-{c0fixPsV%2*qN}l!2jDo!MkAL5Tw->Pv?LVw&!l@aGlaMV7{w+dI^?9JKdc|H zh-a5hskEgnHd0Z$Nn-l^9Wm%y3o~^vCqzmX)qU9OZZHPj^Vpy}11LDh22?~R2sSAD zLg~g|bA&adQH^-^-~HFt^ME(JVUpvNo_$PKS`_~p6W3E3MMxgs=HRfWPma=g#{f?c z7FY5SJ6vLX;@V>5ufeRCs?;sVq3tU;LJbYnckdrS*jJcV+xLdO3ueN)P>F|Y|D7YA zTBE8s&V@Ua~lRW7-R=6LMPDVIjgEKrcNiWOA5|2gx8+b`YAw!J-zL^^GV6&s0U zaFmc&GWh-RPg3d1bz3FoQ`hwl0K0eIjPs2suV8gIGnA$jRgTZ?RB-?9DI5`^Sn_k9 zJ3@bk!F&MtrA;QQm#W&G;8rM|J~9Ng*ulBbhciDz#MM5^oTW+J0L4O_o5=+LJ~G+X zW_{vQia0QaU4+Z{ZY29T>!aw3 z%VoY92Y2cK$!8u?sV!cjicLxsXbQk*!(@((G_5b?GS}RV;vJOLz;YqFXXh}4X5Isz zTXJU#iA>{Ll%4W_>p|Ar28K!aAnC#Ol@#7wO%Z8N^>s1t_X7Ip(C){m>MPco09PwGVbFaoc7_DmCzpW{{qGzN`bH3XCiX8kc*o3_nRJQM`N=)TO- z*&N$q9scC~fgy&MIPZiwCS;-kk}*46>;W|1c}oFI*r!!UD1fI8o`&7*NS)-c9M@UC z_xWqm2Ed@?ilc^H)A5y7T`l`wx!M?p2$*R_BP_9>s`10@mkap{4hn8%NJJ?oW{)ih zq8*azooW*RX+^82!+v+|!fbgMpZEyD`l8fn1TAv|muxmO{;CiKmr}~Ol`+?T9M#}; z(ZN$4wm<-%DJjoiW}Tc|yZG$O@`+3A^F#je#W9goeTCiAw z?YCuP`+>c*Y8B$mP0nvNaXwk;cRH)Dt!z;2230Cf-}Tb`<0j2Zl8%9=zy7rQc4whb zi*HiN_tO6!DDtwtTq;r!^l4NpaMeOaqdLB93urqcOfbSfF9tplKvBDBJpL$@{LeCG zPIu!nE9u%zK9^kFhPmld+1e?RQzaRH&LIo;9l~^IB;4cXx5NNe3 z!jo+6sO1j8GEYb2P(XaBC&0tWe|B7m3Pp;a`i!MW84`&4e>){XYcwuUg@H6SKXnpCTy_VT!hkRsM=;i z7W?fhqMIEL3CRgs>N0OxVyB8c0KyoF#g#kFkfBhT5+(fG$T+f-_*Y-9#rJ{m=~dZ0 z#?byCMrBgZJk=^0R-&M1=@uep=l!|!^DIw{Z}-9tf~T!TmG^DyCb@`UD3DUp&KxvU z^G@|VRR;(z(AF>dF`AyVbgeq# z5p!3}5_AT){DS{x$I-Q+mxMxBfWhD4b}!Z6dJn}Cz|^1Le`U81PkqiL9w&abPS88n zn}Msn11?nFHo9 zr}r3p@~v8J;k?WRgbCv-GKk<=Z&A6(7fh3fk3wQMfx0@BeJ}Tr$_C{{>BTM;9GDHf zeTna{H%$+jDKNOse#AM28Q;4OY-K%?OeQ;+*>W$xGym2=*!2-KJO7Qr67}8M+ye&Y zKodWakUrgy8(aYC3KqoWrOr!=iiA(bnk=ee_2;TalDzT7AAbx23TMqK`mrWk6E~dI z`d2dujq3%kKUF~z^kQrVN}M?d(7lsfW)QS8_O81sj{-zh|G>aRf2fgm(^kZ~e=QcW za=hF_ooDegRe0DX2|u()*&A(rD2RHT$T-^j!%8K?dq6Wbskk+-es!HL?XTWysO+Fy z=E@&aS+s9kHLG@6W62-I1S>FI4)RW~*lZiHCgHK$u1Ti4XM(5ztZwt8XdcrhWzCNo ziA@xUC|#6y&K+vnOM=D`^P$cHN~^4h;9%OC#n@oWAz=;aEB>pWRo;VrjFY|sn<|VQ z482i^13g#QekLsW_+SMAg41)`pG0$y0xBL6eZfByS=rV z->YIW4;l7^T_vSO3G$9<#{EF_+w~gwwyBws3S@2`Jav?j_}EUR$pb|rv_nI_04}=# zUUi^cH$u>4hYT^Q-VscKpGK(>N4^1y1;GlMY7mPO!+H!kMPiVeVUyW65;TlSz_xSi ztWcaI(te?dN9|vBACu+$*_Q4G9IpuS%{zo0viUclZ6B3WjNk`_F#Q-OlwTfd?`S-? zkErP(%ln*PbCobi6gdwKRNikX_s6eXdXl<2TmIL{gG$uaik*ksoEj0%YAEjx4Nud>*+ z3{-Jzks{S?xoyN&cX0uXaY+xS1_-Rsn_vj-WAZ-F1v1OWlNHz4dh4q2O~O2K6o{N{ zqk-rgNIDDv;a|hWo&{#>?7mkR z;51-ub1OjIB23>#hO}fQPh~R7ajMX&+TvWiQj(?t?IurvrMYF70;&M7eL-{QDq(Be zsN-{vJ(jE-l-6Uh^953i1WoZmTd#WD9VeQ?izH%=dsuK4^s1pJK2G=DehmT*ME}LR_`TS#`&z7`d`s&~ zE_0DrMK5P}uS=itcEOw?V%TTLiYu-Isl0RNz@jksUY!o^ z?iYEYLcAr9-2j2$g6#%2qGk1;MlcRyV{;K~!$dJFIpoTO7G1-=LQh-fcG;+p{>;AbT8agXT;B1%^_SPDb!O zfg;26JJjZC>#LP!_)W-`?2&QS;*tK`IalF7HF)_q2`!@QoM%Wh#$DDJvr`l?#S0U> z6xCX#Vs*?bc)f9H>}*dbhfsKzKyR)CVa;xiw5fp99t0neO)2fHEo+Dm{SQ>=r8igv zc|k9cbG%YbBA^;TN%E(BtB;kI#zz64R*_=RX~3GzIfI43WFg;-@ySW zV>4+H-Bi+Px?yq3bTWtN#?txk<~GRgOjaSmzTlC95V=VjK6uCyt$%Jc3{^IhInV1( z#@)@*ObXs1z$NufE`+o10yc=i`*gxnCD@sri&hsit8qZ-0)9myGJ@hTfOJ0A?oD<~ zr*9;-d#mxm#ZKI_K#l8n9!u2y@)-Shg$mpQmSZoQZa-(@Y=^j3d#Z7q4g+lM1MOc( z2BNQ%{Y~uux_OR}11MWD(%sgbj}t$)<7;%d1B`6Ra8=o)Ne#&K_Rot)?&NdvVoM0J z9TrPFw3f$lm|5!79fGGw0L+b@DbDmPSE%0Z|g-qxHZ|S9{A|lY_Diz7kxybyb~4lXffEG0R-`INFe&_<)4dybTrKbEE{_Xfe>9>{FvEhu3Xzs zG+|rWg63$TvZSNsX?4kOD*uUCG-j2gg~VD`A{*E zIwXI@twN0B>_e1Dd*g&Aaz2BG;CZH$qKk4PH4GEH%~-iPEHXHeCwP-x$fIiA6YYJvkC z%2&gKXh$GRw(BPe)tk2zo#3&bm%kndA`YQLIiI`;(nLjxo>=)^0L{IWr@W9n?aWc! zqw+K)ZQ4#Fo;mV@Le|~dyjd$EE)}prjZAOilh!TBAV&Sq$3`ai?g6$hL*FK=1xszU zRT5N1tmJc4da=Aq$G&D*Yy;@?Oixq4*gkak@UCCXd&mYOSl6k$9y>lbn4Gn4C>8g9ji#*4ddK^i)+Wc`{H z7Z1yVGa=XY->9bV)t-k!0cJ~~tC8zxp{@hbSYbD_wCew-m$QCo>i_=u=v3Z<2t!Ir zK)Sm_7$qeP5R_(g3P=k|my8ktCHJONL>Q>RMvd-Lx(3oU_+H+h|KYn|@9w#;d(M5; zd7bBjI63=UREKv7{5(~iNN(^ny+68*ujzI&Q)Qr$ktG^E6+e4Vey`x$h*{D8A)=V?fA*B3nhQw;R6@0!oE_Ka@;$K8 zw2zd=-#ET+9Mra&=6+0VywAcVHkE}8UAZ$s)gut{sKw7t-);Nf;bpA%R`|JCZs&+u zD)3o!GImGfR2oR7A6g_2jKoX9FHGKo#tAMY!pshliW+|YC;)&^{$9tHKS%s{_~!2y zb{MZ%&wY)>nd0GP2NjB6qs>4#nfEbY=ai5&HNkboblDF?)^u2v8o!DPc|qk#QzXYW zF;`*RIWxv)f4_Z*h_KUr_CZ>%3s`VI6?N65h=mhVg}e(TUIGB@9vF@GQ@B zD?B>AF11#8IVplndg%RntNCIy@?LUrLGsR#C4X{YsU&SF?zZhu}Qo56j?5kxZ2^ znK%fub_Y=xG1SajLpg*t6%{EdNrgH`+Jnrj)-)fOyuFg?PoYE9eIy(ajPG=OLk;qILm9HtWQ?rwzg^b=^G$w| zQw;RG`0Pn;#{m0eC1DEhu}2>H1;Ci2wZ~+}>H%#7#3kO)UkQ=LA&qxu_yiC87$H8k*#&+~kndYr zDR=HL%S_@KvPZlhT4#jUmzL<|RTg730o@e~9)i~R>%Z3Emxp!{%#%q9e5A(eD%(2u zvg+PhUFLW>pG{4FUI3r~jrypH>C;KV){93Om-z`0)v)1LGQlprHl4nmZJxS>Wu`^I zj?S6P$AQ3PtgLRHTNESYi=x? z1mgPBM&fY2J=9UCar){x%mW&Xr(NHp_TL`1O7Z6Y_J0S8q4 z60E#Hl?XinnmfJnb&p^kT|{(;dIkANvZCiz)T|K2p9yZiNflULn2BWAylI!tJw-MCwLz(ue7>`9%I8H6LW&J}RMS;4p??N2eLEaxX zc(%Z@K&5$8G6|{x@m}B*3eAn6MA#)B`7Coyn-(Jjh0;gXI z9&4;hCcA28#NHGXt<(iS2%;CV+Bcgi$K1`i%w`*TsVY@_>W~PQis}{zaG7vwqB|Rx zZT5!fBp*rHk!6<dfDhqO1Mq!RsMD=B8HjhFxBJDGBY(wM0*S!SLuj`(mM>eAwTr6dWCF**OeLum1$!_*f^BqMF zsVbMy!vSOdJd>>F&#!3|w|(r?9>-G`o~d;C4NWaT>W5i5HGSN!rab)CG86s^*REaQ z5;Ec-pVRqa6cq|+u)46Jv-(1%kY7_;DDIGD=27``9sgtwQl_I2_Q@QbgGk?nK$-Z} z00uIK`*zHOZGzB;mmh_yvVkx!{*ugKrOkd7F*oKr%^_EFcqp59#$D;RUuDmnvT4z ztJ{k16}Z^^kj&2<3jD%@S~yb^f6#{L_}mETBlhh+}TcoAXIeNPm^blX<#J z;vTG);PGXA--FqqZ<^Llu}j|pQO`3-`NH zpiNp20vn~QdULwTIbqJhn> zHVSEX^9m&yoakd{X>QXIrq>TAH_;ioNj4=-BP=a@IOb}dqWl7@Sp5;V!y9Sz_wLBb zjLvFTB;`_iZ_^NEI86`s%@Zq@>zUU~n|?LbIJq?tFgyt`g41(fu&3DvY;09)IkNrZ z${j9zw2K}L+0M6y;GxPPVO8m~^0^>dMAiqI0`jx&lGc&Jy!J;lzd~@|NA~_Y z3}1VW6CicfP&7!lx8~VR5+Bnt1pn=@7%a8(aj^Ui3uJ(K{v7fy=Kb`N5MhdZj`H)G zT53}nMAxTa5)0`h(GKGrZ}i`6V*(8AEdbzJoYvR5As;B)T`0%@ai(WEuO{+jcf3J;b7^NFCHLeZO8T$9<434kZJ`Wf|yNYId6XpGOc1Sw5 zI=<_9TVJi6fb+0s#)vom3HA^*7KbKQyg__I0H(ePdobgo)+rj$aG z?CB!0XZy;+$8!ut+w+xs4Sw5@jD)ry@}NFG!R? z99)z)OkIYu>aKcW)Ka_LKBV<~H!fsd0Oe#qO|Oh-tRj*wEzWgS5gEDJZ9@1|uD|M; zC;7LubGyH*m9hSK)unx&(+2@tVbF0QX^yEThzGAw%Y}gtf#Qs4Z6K5Up-DK22zZn! z$u4P_A*V;6W(>vOJ`e7>5cr&`;2f^5pOtGKcH$RlJf2i)$^@|&*p;y{sSbqzmJ&vg02=bza&u zaDstuNeu`_M|3APelD?g0vs(*Mdvng88g+^HEd)lQI8RrW#{3}9%1V=67??B< z{$%{v*p>A=+2ZN;-sDJQsT{U9q|7;XE=3De%OGWD@6_lVAesxz}8Qv8Ws% zr^z{KL)jRB7n=enw2BddDlr{z#pS)*7cUOOzIQdI_lh8Kq?dACqtANeSIq)$__ofd z&d@=N(`5AdhV=4MJ;XgS&-iY~3z-yk+I#@%!#qR@a_~R370QcgouB8026?9{1-Hf3 zU^1bZTOYCE*fv8J8Hrv0VL^Squ58diY9#~2$TB`Let)(|NTNMonh;Py8Wnp`DfTHYQJWEuMBr$3vZ5=awAdR&Cx z8+X5O3(dl*QPiwJ{P#r&S03RK3f_aT)7v-Qo)o|LaZ5(FvpjOic%N^A_@UX0Edajb z7YJ1KK_ZRr>yrr0r+u1l|Km*;UbemauU>a=)GD`PPNB5(gUj$Z4577EmiI7tD$C}^l{$F(gzMpx$@P=3RK*HO|s^VI$ZZ^ z<+rvKFKD!R(t8i%e8-5RV+h77BpzI8R}0T>k_quS#BJM&HfPQ#^edc z%xsxI+p(?HpH40R0PR~dj4^Q0JM;MZ?` zlNIh3p2Xfgu}7;VDEB+Uxd_Gc7Uuwl7I5rTu0#~IXFLEzq%r;nT4p7xM|Ly&x8 zMrwmzT6ALigN1wpZf-n_`0z*~qE4Q}(?*}D|ZD8<3F=v|Z^Vga9 zTcEcO3>9n-7QPnd5|+9XvFH{0@ke!h_ z*H>Y`OC}n9jxJDPj}psaH5b`T!QZk1#)12W6A5ufn*MG^G!S&wR1Su3Xb1=^HZ<=6UH z``Xf@KldguU08(h+~>nLzAe{-4_-#WZI;*xSo|*t_H9x$3lIzrX8=C^hjf>Jx@C%Ms-ylaZDrhR?f$Iu42ji11Fs4IU%BF<)yMj{0rZ z;>wxEYEF)wc+x$zML43YOo3%|;-h%-L-!Pb@58nV?$`;}=PJLEhF{Cc{2fNG+P|F) zw;M;rSBCsljH-y;9lR;jyFyh|z~=-N%-bYzjl%&=F8XZ_>+NNGCbBZfM*n>BlW!bK z(HggbW)D(>*bw(!gZLQE>_yYZ5+0oUVLp(psqaini>jwoCOw=&SLbXiUECm(Y{n&4 zUi@9q6?yEQxJbjddv=}?tR!+;rfU1nDkeu=9cU=fdrZIdJ1uGOM4jIX@=5?qv>%qx zD2~p$#MJ5e_M4{#ypsMKe7og)3apbV0D0Ljlc#v{rIrtW)I!zh56luiB-k7m#<@rjPA(a*PN0XXx-DWg-px23tO^Z;oL8$>vjF zLi?q*`r@04C@^f|K0oEJr&jt1%%`+@oo2*$t)0j!q(JNjL^k7EE+y%sbnb>WvMaG$ zyHR9Zr!B{W$G(%zVFXf`Vufmj)Hdq=2=aUX3b3)s<)an!3P=Pu`tS{1G{~ z`v(Ne=>6MEL+!WxKkxC`hv(I$*X$8dX%Cd|khio01W4ejk`$^yWe8{_7J2QHK4ZLd z@J@H$UgsP`q&|9komGyo%qDW8G>vr}nWVC1mN%9Yq(8uOYnb#Q9&gqtI6+jB%uvf= zT!BB5gdBh;O*Q`Kn|=VBH%e-%thHO`Y=9_|zP+OZsDXY@+=*C0)wStg8~yX)7AFXr z1<3E(R4%5gN%-5sDXT%$A1=1c+5rqA2=okCnR9HohmYFOPMi3nM&!|HY2jn<|K|}Y zz*F90t-JxJc>gAtlEn}fl1rk~cu{G$R>Q>re;qXVn_v|M8NsHy?5wv?I}p$)xgxMB zby4p;hZASmpi_2b1^|%1xJA-Oa{U)IQhehMa#CnzrLSlw_c_de=06AX0TG*ud|-_^ zI#fX#LD{&Kxy|8^Ax8jNQjON~KXQDyAE0$|De$XwP;9-BANW=GiXr?KK8M^>uv7cb zGy*WzpO5;h__6mn#x>4r!BF*z@KMWPAZkltmkpf4TCx~XrKa~Id|F*^dAHR|- bB!~dCT&iV1Utq6v`$R2uJ+&$oo5=qIA$OJ1 literal 0 HcmV?d00001 diff --git a/vendor/github.com/dexidp/dex/web/themes/light/styles.css b/vendor/github.com/dexidp/dex/web/themes/light/styles.css new file mode 100644 index 00000000..2d920571 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/themes/light/styles.css @@ -0,0 +1,113 @@ +.theme-body { + background-color: #efefef; + color: #333; + font-family: 'Source Sans Pro', Helvetica, sans-serif; +} + +.theme-navbar { + background-color: #fff; + box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); + color: #333; + font-size: 13px; + font-weight: 100; + height: 46px; + overflow: hidden; + padding: 0 10px; +} + +.theme-navbar__logo-wrap { + display: inline-block; + height: 100%; + overflow: hidden; + padding: 10px 15px; + width: 300px; +} + +.theme-navbar__logo { + height: 100%; + max-height: 25px; +} + +.theme-heading { + font-size: 20px; + font-weight: 500; + margin-bottom: 10px; + margin-top: 0; +} + +.theme-panel { + background-color: #fff; + box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); + padding: 30px; +} + +.theme-btn-provider { + background-color: #fff; + color: #333; + min-width: 250px; +} + +.theme-btn-provider:hover { + color: #999; +} + +.theme-btn--primary { + background-color: #333; + border: none; + color: #fff; + min-width: 200px; + padding: 6px 12px; +} + +.theme-btn--primary:hover { + background-color: #666; + color: #fff; +} + +.theme-btn--success { + background-color: #2FC98E; + color: #fff; + width: 250px; +} + +.theme-btn--success:hover { + background-color: #49E3A8; +} + +.theme-form-row { + display: block; + margin: 20px auto; +} + +.theme-form-input { + border-radius: 4px; + border: 1px solid #CCC; + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); + color: #666; + display: block; + font-size: 14px; + height: 36px; + line-height: 1.42857143; + margin: auto; + padding: 6px 12px; + width: 250px; +} + +.theme-form-input:focus, +.theme-form-input:active { + border-color: #66AFE9; + outline: none; +} + +.theme-form-label { + font-size: 13px; + font-weight: 600; + margin: 4px auto; + position: relative; + text-align: left; + width: 250px; +} + +.theme-link-back { + margin-top: 4px; +} diff --git a/vendor/github.com/dexidp/dex/web/web.go b/vendor/github.com/dexidp/dex/web/web.go new file mode 100644 index 00000000..0c7e9873 --- /dev/null +++ b/vendor/github.com/dexidp/dex/web/web.go @@ -0,0 +1,14 @@ +package web + +import ( + "embed" + "io/fs" +) + +//go:embed static/* templates/* themes/* robots.txt +var files embed.FS + +// FS returns a filesystem with the default web assets. +func FS() fs.FS { + return files +} diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml new file mode 100644 index 00000000..bfc42120 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 diff --git a/vendor/github.com/felixge/httpsnoop/BUILD b/vendor/github.com/felixge/httpsnoop/BUILD new file mode 100644 index 00000000..e3698a19 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "httpsnoop", + srcs = [ + "capture_metrics.go", + "docs.go", + "wrap_generated_gteq_1.8.go", + "wrap_generated_lt_1.8.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/felixge/httpsnoop", + importpath = "github.com/felixge/httpsnoop", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 00000000..e028b46a --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 00000000..2d84889a --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 00000000..ddcecd13 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop) +[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 00000000..b77cc7c0 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 00000000..203c35b3 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 00000000..31cbdfb8 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 00000000..ab99c07c --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore new file mode 100644 index 00000000..e256a31e --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml new file mode 100644 index 00000000..0e9d6edc --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 +script: + - go test + - go build diff --git a/vendor/github.com/ghodss/yaml/BUILD b/vendor/github.com/ghodss/yaml/BUILD new file mode 100644 index 00000000..c4e0f1aa --- /dev/null +++ b/vendor/github.com/ghodss/yaml/BUILD @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "yaml", + srcs = [ + "fields.go", + "yaml.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/ghodss/yaml", + importpath = "github.com/ghodss/yaml", + visibility = ["//visibility:public"], + deps = ["//vendor/gopkg.in/yaml.v2:yaml_v2"], +) diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 00000000..7805d36d --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 00000000..0200f75b --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 00000000..58600740 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 00000000..4fb4054a --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/BUILD b/vendor/github.com/go-asn1-ber/asn1-ber/BUILD new file mode 100644 index 00000000..a0ec1c8d --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/BUILD @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "asn1-ber", + srcs = [ + "ber.go", + "content_int.go", + "generalizedTime.go", + "header.go", + "identifier.go", + "length.go", + "real.go", + "util.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/go-asn1-ber/asn1-ber", + importpath = "github.com/go-asn1-ber/asn1-ber", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE b/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE new file mode 100644 index 00000000..23f94253 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-asn1-ber Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/README.md b/vendor/github.com/go-asn1-ber/asn1-ber/README.md new file mode 100644 index 00000000..e3a9560d --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/README.md @@ -0,0 +1,24 @@ +[![GoDoc](https://godoc.org/gopkg.in/asn1-ber.v1?status.svg)](https://godoc.org/gopkg.in/asn1-ber.v1) [![Build Status](https://travis-ci.org/go-asn1-ber/asn1-ber.svg)](https://travis-ci.org/go-asn1-ber/asn1-ber) + + +ASN1 BER Encoding / Decoding Library for the GO programming language. +--------------------------------------------------------------------- + +Required libraries: + None + +Working: + Very basic encoding / decoding needed for LDAP protocol + +Tests Implemented: + A few + +TODO: + Fix all encoding / decoding to conform to ASN1 BER spec + Implement Tests / Benchmarks + +--- + +The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/) +The design is licensed under the Creative Commons 3.0 Attributions license. +Read this article for more details: http://blog.golang.org/gopher diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/ber.go b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go new file mode 100644 index 00000000..4fd7a66e --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go @@ -0,0 +1,620 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + "time" + "unicode/utf8" +) + +// MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for +// no limit. +var MaxPacketLengthBytes int64 = math.MaxInt32 + +type Packet struct { + Identifier + Value interface{} + ByteValue []byte + Data *bytes.Buffer + Children []*Packet + Description string +} + +type Identifier struct { + ClassType Class + TagType Type + Tag Tag +} + +type Tag uint64 + +const ( + TagEOC Tag = 0x00 + TagBoolean Tag = 0x01 + TagInteger Tag = 0x02 + TagBitString Tag = 0x03 + TagOctetString Tag = 0x04 + TagNULL Tag = 0x05 + TagObjectIdentifier Tag = 0x06 + TagObjectDescriptor Tag = 0x07 + TagExternal Tag = 0x08 + TagRealFloat Tag = 0x09 + TagEnumerated Tag = 0x0a + TagEmbeddedPDV Tag = 0x0b + TagUTF8String Tag = 0x0c + TagRelativeOID Tag = 0x0d + TagSequence Tag = 0x10 + TagSet Tag = 0x11 + TagNumericString Tag = 0x12 + TagPrintableString Tag = 0x13 + TagT61String Tag = 0x14 + TagVideotexString Tag = 0x15 + TagIA5String Tag = 0x16 + TagUTCTime Tag = 0x17 + TagGeneralizedTime Tag = 0x18 + TagGraphicString Tag = 0x19 + TagVisibleString Tag = 0x1a + TagGeneralString Tag = 0x1b + TagUniversalString Tag = 0x1c + TagCharacterString Tag = 0x1d + TagBMPString Tag = 0x1e + TagBitmask Tag = 0x1f // xxx11111b + + // HighTag indicates the start of a high-tag byte sequence + HighTag Tag = 0x1f // xxx11111b + // HighTagContinueBitmask indicates the high-tag byte sequence should continue + HighTagContinueBitmask Tag = 0x80 // 10000000b + // HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte + HighTagValueBitmask Tag = 0x7f // 01111111b +) + +const ( + // LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used + LengthLongFormBitmask = 0x80 + // LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence + LengthValueBitmask = 0x7f + + // LengthIndefinite is returned from readLength to indicate an indefinite length + LengthIndefinite = -1 +) + +var tagMap = map[Tag]string{ + TagEOC: "EOC (End-of-Content)", + TagBoolean: "Boolean", + TagInteger: "Integer", + TagBitString: "Bit String", + TagOctetString: "Octet String", + TagNULL: "NULL", + TagObjectIdentifier: "Object Identifier", + TagObjectDescriptor: "Object Descriptor", + TagExternal: "External", + TagRealFloat: "Real (float)", + TagEnumerated: "Enumerated", + TagEmbeddedPDV: "Embedded PDV", + TagUTF8String: "UTF8 String", + TagRelativeOID: "Relative-OID", + TagSequence: "Sequence and Sequence of", + TagSet: "Set and Set OF", + TagNumericString: "Numeric String", + TagPrintableString: "Printable String", + TagT61String: "T61 String", + TagVideotexString: "Videotex String", + TagIA5String: "IA5 String", + TagUTCTime: "UTC Time", + TagGeneralizedTime: "Generalized Time", + TagGraphicString: "Graphic String", + TagVisibleString: "Visible String", + TagGeneralString: "General String", + TagUniversalString: "Universal String", + TagCharacterString: "Character String", + TagBMPString: "BMP String", +} + +type Class uint8 + +const ( + ClassUniversal Class = 0 // 00xxxxxxb + ClassApplication Class = 64 // 01xxxxxxb + ClassContext Class = 128 // 10xxxxxxb + ClassPrivate Class = 192 // 11xxxxxxb + ClassBitmask Class = 192 // 11xxxxxxb +) + +var ClassMap = map[Class]string{ + ClassUniversal: "Universal", + ClassApplication: "Application", + ClassContext: "Context", + ClassPrivate: "Private", +} + +type Type uint8 + +const ( + TypePrimitive Type = 0 // xx0xxxxxb + TypeConstructed Type = 32 // xx1xxxxxb + TypeBitmask Type = 32 // xx1xxxxxb +) + +var TypeMap = map[Type]string{ + TypePrimitive: "Primitive", + TypeConstructed: "Constructed", +} + +var Debug = false + +func PrintBytes(out io.Writer, buf []byte, indent string) { + dataLines := make([]string, (len(buf)/30)+1) + numLines := make([]string, (len(buf)/30)+1) + + for i, b := range buf { + dataLines[i/30] += fmt.Sprintf("%02x ", b) + numLines[i/30] += fmt.Sprintf("%02d ", (i+1)%100) + } + + for i := 0; i < len(dataLines); i++ { + _, _ = out.Write([]byte(indent + dataLines[i] + "\n")) + _, _ = out.Write([]byte(indent + numLines[i] + "\n\n")) + } +} + +func WritePacket(out io.Writer, p *Packet) { + printPacket(out, p, 0, false) +} + +func PrintPacket(p *Packet) { + printPacket(os.Stdout, p, 0, false) +} + +func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) { + indentStr := "" + + for len(indentStr) != indent { + indentStr += " " + } + + classStr := ClassMap[p.ClassType] + + tagTypeStr := TypeMap[p.TagType] + + tagStr := fmt.Sprintf("0x%02X", p.Tag) + + if p.ClassType == ClassUniversal { + tagStr = tagMap[p.Tag] + } + + value := fmt.Sprint(p.Value) + description := "" + + if p.Description != "" { + description = p.Description + ": " + } + + _, _ = fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indentStr, description, classStr, tagTypeStr, tagStr, p.Data.Len(), value) + + if printBytes { + PrintBytes(out, p.Bytes(), indentStr) + } + + for _, child := range p.Children { + printPacket(out, child, indent+1, printBytes) + } +} + +// ReadPacket reads a single Packet from the reader. +func ReadPacket(reader io.Reader) (*Packet, error) { + p, _, err := readPacket(reader) + if err != nil { + return nil, err + } + return p, nil +} + +func DecodeString(data []byte) string { + return string(data) +} + +func ParseInt64(bytes []byte) (ret int64, err error) { + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = fmt.Errorf("integer too large") + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +func encodeInteger(i int64) []byte { + n := int64Length(i) + out := make([]byte, n) + + var j int + for ; n > 0; n-- { + out[j] = byte(i >> uint((n-1)*8)) + j++ + } + + return out +} + +func int64Length(i int64) (numBytes int) { + numBytes = 1 + + for i > 127 { + numBytes++ + i >>= 8 + } + + for i < -128 { + numBytes++ + i >>= 8 + } + + return +} + +// DecodePacket decodes the given bytes into a single Packet +// If a decode error is encountered, nil is returned. +func DecodePacket(data []byte) *Packet { + p, _, _ := readPacket(bytes.NewBuffer(data)) + + return p +} + +// DecodePacketErr decodes the given bytes into a single Packet +// If a decode error is encountered, nil is returned. +func DecodePacketErr(data []byte) (*Packet, error) { + p, _, err := readPacket(bytes.NewBuffer(data)) + if err != nil { + return nil, err + } + return p, nil +} + +// readPacket reads a single Packet from the reader, returning the number of bytes read. +func readPacket(reader io.Reader) (*Packet, int, error) { + identifier, length, read, err := readHeader(reader) + if err != nil { + return nil, read, err + } + + p := &Packet{ + Identifier: identifier, + } + + p.Data = new(bytes.Buffer) + p.Children = make([]*Packet, 0, 2) + p.Value = nil + + if p.TagType == TypeConstructed { + // TODO: if universal, ensure tag type is allowed to be constructed + + // Track how much content we've read + contentRead := 0 + for { + if length != LengthIndefinite { + // End if we've read what we've been told to + if contentRead == length { + break + } + // Detect if a packet boundary didn't fall on the expected length + if contentRead > length { + return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead) + } + } + + // Read the next packet + child, r, err := readPacket(reader) + if err != nil { + return nil, read, err + } + contentRead += r + read += r + + // Test is this is the EOC marker for our packet + if isEOCPacket(child) { + if length == LengthIndefinite { + break + } + return nil, read, errors.New("eoc child not allowed with definite length") + } + + // Append and continue + p.AppendChild(child) + } + return p, read, nil + } + + if length == LengthIndefinite { + return nil, read, errors.New("indefinite length used with primitive type") + } + + // Read definite-length content + if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes { + return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes) + } + content := make([]byte, length) + if length > 0 { + _, err := io.ReadFull(reader, content) + if err != nil { + if err == io.EOF { + return nil, read, io.ErrUnexpectedEOF + } + return nil, read, err + } + read += length + } + + if p.ClassType == ClassUniversal { + p.Data.Write(content) + p.ByteValue = content + + switch p.Tag { + case TagEOC: + case TagBoolean: + val, _ := ParseInt64(content) + + p.Value = val != 0 + case TagInteger: + p.Value, _ = ParseInt64(content) + case TagBitString: + case TagOctetString: + // the actual string encoding is not known here + // (e.g. for LDAP content is already an UTF8-encoded + // string). Return the data without further processing + p.Value = DecodeString(content) + case TagNULL: + case TagObjectIdentifier: + case TagObjectDescriptor: + case TagExternal: + case TagRealFloat: + p.Value, err = ParseReal(content) + case TagEnumerated: + p.Value, _ = ParseInt64(content) + case TagEmbeddedPDV: + case TagUTF8String: + val := DecodeString(content) + if !utf8.Valid([]byte(val)) { + err = errors.New("invalid UTF-8 string") + } else { + p.Value = val + } + case TagRelativeOID: + case TagSequence: + case TagSet: + case TagNumericString: + case TagPrintableString: + val := DecodeString(content) + if err = isPrintableString(val); err == nil { + p.Value = val + } + case TagT61String: + case TagVideotexString: + case TagIA5String: + val := DecodeString(content) + for i, c := range val { + if c >= 0x7F { + err = fmt.Errorf("invalid character for IA5String at pos %d: %c", i, c) + break + } + } + if err == nil { + p.Value = val + } + case TagUTCTime: + case TagGeneralizedTime: + p.Value, err = ParseGeneralizedTime(content) + case TagGraphicString: + case TagVisibleString: + case TagGeneralString: + case TagUniversalString: + case TagCharacterString: + case TagBMPString: + } + } else { + p.Data.Write(content) + } + + return p, read, err +} + +func isPrintableString(val string) error { + for i, c := range val { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c >= '0' && c <= '9': + default: + switch c { + case '\'', '(', ')', '+', ',', '-', '.', '=', '/', ':', '?', ' ': + default: + return fmt.Errorf("invalid character in position %d", i) + } + } + } + return nil +} + +func (p *Packet) Bytes() []byte { + var out bytes.Buffer + + out.Write(encodeIdentifier(p.Identifier)) + out.Write(encodeLength(p.Data.Len())) + out.Write(p.Data.Bytes()) + + return out.Bytes() +} + +func (p *Packet) AppendChild(child *Packet) { + p.Data.Write(child.Bytes()) + p.Children = append(p.Children, child) +} + +func Encode(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { + p := new(Packet) + + p.ClassType = classType + p.TagType = tagType + p.Tag = tag + p.Data = new(bytes.Buffer) + + p.Children = make([]*Packet, 0, 2) + + p.Value = value + p.Description = description + + if value != nil { + v := reflect.ValueOf(value) + + if classType == ClassUniversal { + switch tag { + case TagOctetString: + sv, ok := v.Interface().(string) + + if ok { + p.Data.Write([]byte(sv)) + } + case TagEnumerated: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + case TagEmbeddedPDV: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + } + } else if classType == ClassContext { + switch tag { + case TagEnumerated: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + case TagEmbeddedPDV: + bv, ok := v.Interface().([]byte) + if ok { + p.Data.Write(bv) + } + } + } + } + return p +} + +func NewSequence(description string) *Packet { + return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, description) +} + +func NewBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet { + intValue := int64(0) + + if value { + intValue = 1 + } + + p := Encode(classType, tagType, tag, nil, description) + + p.Value = value + p.Data.Write(encodeInteger(intValue)) + + return p +} + +// NewLDAPBoolean returns a RFC 4511-compliant Boolean packet. +func NewLDAPBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet { + intValue := int64(0) + + if value { + intValue = 255 + } + + p := Encode(classType, tagType, tag, nil, description) + + p.Value = value + p.Data.Write(encodeInteger(intValue)) + + return p +} + +func NewInteger(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + + p.Value = value + switch v := value.(type) { + case int: + p.Data.Write(encodeInteger(int64(v))) + case uint: + p.Data.Write(encodeInteger(int64(v))) + case int64: + p.Data.Write(encodeInteger(v)) + case uint64: + // TODO : check range or add encodeUInt... + p.Data.Write(encodeInteger(int64(v))) + case int32: + p.Data.Write(encodeInteger(int64(v))) + case uint32: + p.Data.Write(encodeInteger(int64(v))) + case int16: + p.Data.Write(encodeInteger(int64(v))) + case uint16: + p.Data.Write(encodeInteger(int64(v))) + case int8: + p.Data.Write(encodeInteger(int64(v))) + case uint8: + p.Data.Write(encodeInteger(int64(v))) + default: + // TODO : add support for big.Int ? + panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v)) + } + + return p +} + +func NewString(classType Class, tagType Type, tag Tag, value, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + + p.Value = value + p.Data.Write([]byte(value)) + + return p +} + +func NewGeneralizedTime(classType Class, tagType Type, tag Tag, value time.Time, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + var s string + if value.Nanosecond() != 0 { + s = value.Format(`20060102150405.000000000Z`) + } else { + s = value.Format(`20060102150405Z`) + } + p.Value = s + p.Data.Write([]byte(s)) + return p +} + +func NewReal(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet { + p := Encode(classType, tagType, tag, nil, description) + + switch v := value.(type) { + case float64: + p.Data.Write(encodeFloat(v)) + case float32: + p.Data.Write(encodeFloat(float64(v))) + default: + panic(fmt.Sprintf("Invalid type %T, expected float{64|32}", v)) + } + return p +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go new file mode 100644 index 00000000..20b500f5 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go @@ -0,0 +1,25 @@ +package ber + +func encodeUnsignedInteger(i uint64) []byte { + n := uint64Length(i) + out := make([]byte, n) + + var j int + for ; n > 0; n-- { + out[j] = byte(i >> uint((n-1)*8)) + j++ + } + + return out +} + +func uint64Length(i uint64) (numBytes int) { + numBytes = 1 + + for i > 255 { + numBytes++ + i >>= 8 + } + + return +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go b/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go new file mode 100644 index 00000000..51215f06 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go @@ -0,0 +1,105 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "time" +) + +// ErrInvalidTimeFormat is returned when the generalizedTime string was not correct. +var ErrInvalidTimeFormat = errors.New("invalid time format") + +var zeroTime = time.Time{} + +// ParseGeneralizedTime parses a string value and if it conforms to +// GeneralizedTime[^0] format, will return a time.Time for that value. +// +// [^0]: https://www.itu.int/rec/T-REC-X.690-201508-I/en Section 11.7 +func ParseGeneralizedTime(v []byte) (time.Time, error) { + var format string + var fract time.Duration + + str := []byte(DecodeString(v)) + tzIndex := bytes.IndexAny(str, "Z+-") + if tzIndex < 0 { + return zeroTime, ErrInvalidTimeFormat + } + + dot := bytes.IndexAny(str, ".,") + switch dot { + case -1: + switch tzIndex { + case 10: + format = `2006010215Z` + case 12: + format = `200601021504Z` + case 14: + format = `20060102150405Z` + default: + return zeroTime, ErrInvalidTimeFormat + } + + case 10, 12: + if tzIndex < dot { + return zeroTime, ErrInvalidTimeFormat + } + // a "," is also allowed, but would not be parsed by time.Parse(): + str[dot] = '.' + + // If is omitted, then represents a fraction of an + // hour; otherwise, if and are omitted, then + // represents a fraction of a minute; otherwise, + // represents a fraction of a second. + + // parse as float from dot to timezone + f, err := strconv.ParseFloat(string(str[dot:tzIndex]), 64) + if err != nil { + return zeroTime, fmt.Errorf("failed to parse float: %s", err) + } + // ...and strip that part + str = append(str[:dot], str[tzIndex:]...) + tzIndex = dot + + if dot == 10 { + fract = time.Duration(int64(f * float64(time.Hour))) + format = `2006010215Z` + } else { + fract = time.Duration(int64(f * float64(time.Minute))) + format = `200601021504Z` + } + + case 14: + if tzIndex < dot { + return zeroTime, ErrInvalidTimeFormat + } + str[dot] = '.' + // no need for fractional seconds, time.Parse() handles that + format = `20060102150405Z` + + default: + return zeroTime, ErrInvalidTimeFormat + } + + l := len(str) + switch l - tzIndex { + case 1: + if str[l-1] != 'Z' { + return zeroTime, ErrInvalidTimeFormat + } + case 3: + format += `0700` + str = append(str, []byte("00")...) + case 5: + format += `0700` + default: + return zeroTime, ErrInvalidTimeFormat + } + + t, err := time.Parse(format, string(str)) + if err != nil { + return zeroTime, fmt.Errorf("%s: %s", ErrInvalidTimeFormat, err) + } + return t.Add(fract), nil +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/header.go b/vendor/github.com/go-asn1-ber/asn1-ber/header.go new file mode 100644 index 00000000..7dfa6b9a --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/header.go @@ -0,0 +1,38 @@ +package ber + +import ( + "errors" + "fmt" + "io" +) + +func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) { + var ( + c, l int + i Identifier + ) + + if i, c, err = readIdentifier(reader); err != nil { + return Identifier{}, 0, read, err + } + identifier = i + read += c + + if l, c, err = readLength(reader); err != nil { + return Identifier{}, 0, read, err + } + length = l + read += c + + // Validate length type with identifier (x.600, 8.1.3.2.a) + if length == LengthIndefinite && identifier.TagType == TypePrimitive { + return Identifier{}, 0, read, errors.New("indefinite length used with primitive type") + } + + if length < LengthIndefinite { + err = fmt.Errorf("length cannot be less than %d", LengthIndefinite) + return + } + + return identifier, length, read, nil +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go new file mode 100644 index 00000000..e8c43574 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go @@ -0,0 +1,112 @@ +package ber + +import ( + "errors" + "fmt" + "io" +) + +func readIdentifier(reader io.Reader) (Identifier, int, error) { + identifier := Identifier{} + read := 0 + + // identifier byte + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading identifier byte: %v\n", err) + } + return Identifier{}, read, err + } + read++ + + identifier.ClassType = Class(b) & ClassBitmask + identifier.TagType = Type(b) & TypeBitmask + + if tag := Tag(b) & TagBitmask; tag != HighTag { + // short-form tag + identifier.Tag = tag + return identifier, read, nil + } + + // high-tag-number tag + tagBytes := 0 + for { + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err) + } + return Identifier{}, read, err + } + tagBytes++ + read++ + + // Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b) + identifier.Tag <<= 7 + identifier.Tag |= Tag(b) & HighTagValueBitmask + + // First byte may not be all zeros (x.690, 8.1.2.4.2.c) + if tagBytes == 1 && identifier.Tag == 0 { + return Identifier{}, read, errors.New("invalid first high-tag-number tag byte") + } + // Overflow of int64 + // TODO: support big int tags? + if tagBytes > 9 { + return Identifier{}, read, errors.New("high-tag-number tag overflow") + } + + // Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a) + if Tag(b)&HighTagContinueBitmask == 0 { + break + } + } + + return identifier, read, nil +} + +func encodeIdentifier(identifier Identifier) []byte { + b := []byte{0x0} + b[0] |= byte(identifier.ClassType) + b[0] |= byte(identifier.TagType) + + if identifier.Tag < HighTag { + // Short-form + b[0] |= byte(identifier.Tag) + } else { + // high-tag-number + b[0] |= byte(HighTag) + + tag := identifier.Tag + + b = append(b, encodeHighTag(tag)...) + } + return b +} + +func encodeHighTag(tag Tag) []byte { + // set cap=4 to hopefully avoid additional allocations + b := make([]byte, 0, 4) + for tag != 0 { + // t := last 7 bits of tag (HighTagValueBitmask = 0x7F) + t := tag & HighTagValueBitmask + + // right shift tag 7 to remove what was just pulled off + tag >>= 7 + + // if b already has entries this entry needs a continuation bit (0x80) + if len(b) != 0 { + t |= HighTagContinueBitmask + } + + b = append(b, byte(t)) + } + // reverse + // since bits were pulled off 'tag' small to high the byte slice is in reverse order. + // example: tag = 0xFF results in {0x7F, 0x01 + 0x80 (continuation bit)} + // this needs to be reversed into 0x81 0x7F + for i, j := 0, len(b)-1; i < len(b)/2; i++ { + b[i], b[j-i] = b[j-i], b[i] + } + return b +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/length.go b/vendor/github.com/go-asn1-ber/asn1-ber/length.go new file mode 100644 index 00000000..9cc195d0 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/length.go @@ -0,0 +1,81 @@ +package ber + +import ( + "errors" + "fmt" + "io" +) + +func readLength(reader io.Reader) (length int, read int, err error) { + // length byte + b, err := readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading length byte: %v\n", err) + } + return 0, 0, err + } + read++ + + switch { + case b == 0xFF: + // Invalid 0xFF (x.600, 8.1.3.5.c) + return 0, read, errors.New("invalid length byte 0xff") + + case b == LengthLongFormBitmask: + // Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6) + length = LengthIndefinite + + case b&LengthLongFormBitmask == 0: + // Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4) + length = int(b) & LengthValueBitmask + + case b&LengthLongFormBitmask != 0: + // Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b) + lengthBytes := int(b) & LengthValueBitmask + // Protect against overflow + // TODO: support big int length? + if lengthBytes > 8 { + return 0, read, errors.New("long-form length overflow") + } + + // Accumulate into a 64-bit variable + var length64 int64 + for i := 0; i < lengthBytes; i++ { + b, err = readByte(reader) + if err != nil { + if Debug { + fmt.Printf("error reading long-form length byte %d: %v\n", i, err) + } + return 0, read, err + } + read++ + + // x.600, 8.1.3.5 + length64 <<= 8 + length64 |= int64(b) + } + + // Cast to a platform-specific integer + length = int(length64) + // Ensure we didn't overflow + if int64(length) != length64 { + return 0, read, errors.New("long-form length overflow") + } + + default: + return 0, read, errors.New("invalid length byte") + } + + return length, read, nil +} + +func encodeLength(length int) []byte { + lengthBytes := encodeUnsignedInteger(uint64(length)) + if length > 127 || len(lengthBytes) > 1 { + longFormBytes := []byte{LengthLongFormBitmask | byte(len(lengthBytes))} + longFormBytes = append(longFormBytes, lengthBytes...) + lengthBytes = longFormBytes + } + return lengthBytes +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/real.go b/vendor/github.com/go-asn1-ber/asn1-ber/real.go new file mode 100644 index 00000000..610a003a --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/real.go @@ -0,0 +1,157 @@ +package ber + +import ( + "bytes" + "errors" + "fmt" + "math" + "strconv" + "strings" +) + +func encodeFloat(v float64) []byte { + switch { + case math.IsInf(v, 1): + return []byte{0x40} + case math.IsInf(v, -1): + return []byte{0x41} + case math.IsNaN(v): + return []byte{0x42} + case v == 0.0: + if math.Signbit(v) { + return []byte{0x43} + } + return []byte{} + default: + // we take the easy part ;-) + value := []byte(strconv.FormatFloat(v, 'G', -1, 64)) + var ret []byte + if bytes.Contains(value, []byte{'E'}) { + ret = []byte{0x03} + } else { + ret = []byte{0x02} + } + ret = append(ret, value...) + return ret + } +} + +func ParseReal(v []byte) (val float64, err error) { + if len(v) == 0 { + return 0.0, nil + } + switch { + case v[0]&0x80 == 0x80: + val, err = parseBinaryFloat(v) + case v[0]&0xC0 == 0x40: + val, err = parseSpecialFloat(v) + case v[0]&0xC0 == 0x0: + val, err = parseDecimalFloat(v) + default: + return 0.0, fmt.Errorf("invalid info block") + } + if err != nil { + return 0.0, err + } + + if val == 0.0 && !math.Signbit(val) { + return 0.0, errors.New("REAL value +0 must be encoded with zero-length value block") + } + return val, nil +} + +func parseBinaryFloat(v []byte) (float64, error) { + var info byte + var buf []byte + + info, v = v[0], v[1:] + + var base int + switch info & 0x30 { + case 0x00: + base = 2 + case 0x10: + base = 8 + case 0x20: + base = 16 + case 0x30: + return 0.0, errors.New("bits 6 and 5 of information octet for REAL are equal to 11") + } + + scale := uint((info & 0x0c) >> 2) + + var expLen int + switch info & 0x03 { + case 0x00: + expLen = 1 + case 0x01: + expLen = 2 + case 0x02: + expLen = 3 + case 0x03: + expLen = int(v[0]) + if expLen > 8 { + return 0.0, errors.New("too big value of exponent") + } + v = v[1:] + } + buf, v = v[:expLen], v[expLen:] + exponent, err := ParseInt64(buf) + if err != nil { + return 0.0, err + } + + if len(v) > 8 { + return 0.0, errors.New("too big value of mantissa") + } + + mant, err := ParseInt64(v) + if err != nil { + return 0.0, err + } + mantissa := mant << scale + + if info&0x40 == 0x40 { + mantissa = -mantissa + } + + return float64(mantissa) * math.Pow(float64(base), float64(exponent)), nil +} + +func parseDecimalFloat(v []byte) (val float64, err error) { + switch v[0] & 0x3F { + case 0x01: // NR form 1 + var iVal int64 + iVal, err = strconv.ParseInt(strings.TrimLeft(string(v[1:]), " "), 10, 64) + val = float64(iVal) + case 0x02, 0x03: // NR form 2, 3 + val, err = strconv.ParseFloat(strings.Replace(strings.TrimLeft(string(v[1:]), " "), ",", ".", -1), 64) + default: + err = errors.New("incorrect NR form") + } + if err != nil { + return 0.0, err + } + + if val == 0.0 && math.Signbit(val) { + return 0.0, errors.New("REAL value -0 must be encoded as a special value") + } + return val, nil +} + +func parseSpecialFloat(v []byte) (float64, error) { + if len(v) != 1 { + return 0.0, errors.New(`encoding of "special value" must not contain exponent and mantissa`) + } + switch v[0] { + case 0x40: + return math.Inf(1), nil + case 0x41: + return math.Inf(-1), nil + case 0x42: + return math.NaN(), nil + case 0x43: + return math.Copysign(0, -1), nil + } + return 0.0, errors.New(`encoding of "special value" not from ASN.1 standard`) +} diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/util.go b/vendor/github.com/go-asn1-ber/asn1-ber/util.go new file mode 100644 index 00000000..14dc87d7 --- /dev/null +++ b/vendor/github.com/go-asn1-ber/asn1-ber/util.go @@ -0,0 +1,24 @@ +package ber + +import "io" + +func readByte(reader io.Reader) (byte, error) { + bytes := make([]byte, 1) + _, err := io.ReadFull(reader, bytes) + if err != nil { + if err == io.EOF { + return 0, io.ErrUnexpectedEOF + } + return 0, err + } + return bytes[0], nil +} + +func isEOCPacket(p *Packet) bool { + return p != nil && + p.Tag == TagEOC && + p.ClassType == ClassUniversal && + p.TagType == TypePrimitive && + len(p.ByteValue) == 0 && + len(p.Children) == 0 +} diff --git a/vendor/github.com/go-jose/go-jose/v3/.gitignore b/vendor/github.com/go-jose/go-jose/v3/.gitignore new file mode 100644 index 00000000..eb29ebae --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.gitignore @@ -0,0 +1,2 @@ +jose-util/jose-util +jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v3/.golangci.yml b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml new file mode 100644 index 00000000..2a577a8f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml @@ -0,0 +1,53 @@ +# https://github.com/golangci/golangci-lint + +run: + skip-files: + - doc_test.go + modules-download-mode: readonly + +linters: + enable-all: true + disable: + - gochecknoglobals + - goconst + - lll + - maligned + - nakedret + - scopelint + - unparam + - funlen # added in 1.18 (requires go-jose changes before it can be enabled) + +linters-settings: + gocyclo: + min-complexity: 35 + +issues: + exclude-rules: + - text: "don't use ALL_CAPS in Go names" + linters: + - golint + - text: "hardcoded credentials" + linters: + - gosec + - text: "weak cryptographic primitive" + linters: + - gosec + - path: json/ + linters: + - dupl + - errcheck + - gocritic + - gocyclo + - golint + - govet + - ineffassign + - staticcheck + - structcheck + - stylecheck + - unused + - path: _test\.go + linters: + - scopelint + - path: jwk.go + linters: + - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v3/.travis.yml b/vendor/github.com/go-jose/go-jose/v3/.travis.yml new file mode 100644 index 00000000..48de631b --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.travis.yml @@ -0,0 +1,33 @@ +language: go + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: + - "1.13.x" + - "1.14.x" + - tip + +before_script: + - export PATH=$HOME/.local/bin:$PATH + +before_install: + - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 + - pip install cram --user + +script: + - go test -v -covermode=count -coverprofile=profile.cov . + - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner + - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher + - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt + - go test -v ./json # no coverage for forked encoding/json package + - golangci-lint run + - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util + - cd .. + +after_success: + - gocovmerge *.cov */*.cov > merged.coverprofile + - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md new file mode 100644 index 00000000..3305db0f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md @@ -0,0 +1,10 @@ +Serious about security +====================== + +Square recognizes the important contributions the security research community +can make. We therefore encourage reporting security issues with the code +contained in this repository. + +If you believe you have discovered a security vulnerability, please follow the +guidelines at . + diff --git a/vendor/github.com/go-jose/go-jose/v3/BUILD b/vendor/github.com/go-jose/go-jose/v3/BUILD new file mode 100644 index 00000000..63a8933e --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go-jose", + srcs = [ + "asymmetric.go", + "crypter.go", + "doc.go", + "encoding.go", + "jwe.go", + "jwk.go", + "jws.go", + "opaque.go", + "shared.go", + "signing.go", + "symmetric.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/go-jose/go-jose/v3", + importpath = "github.com/go-jose/go-jose/v3", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/go-jose/go-jose/v3/cipher", + "//vendor/github.com/go-jose/go-jose/v3/json", + "//vendor/golang.org/x/crypto/pbkdf2", + ], +) diff --git a/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md new file mode 100644 index 00000000..b63e1f8f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +Individual Contributor License Agreement. We use [cla-assistant.io][1] and you +will be prompted to sign once a pull request is opened. + +[1]: https://cla-assistant.io/ diff --git a/vendor/github.com/go-jose/go-jose/v3/LICENSE b/vendor/github.com/go-jose/go-jose/v3/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md new file mode 100644 index 00000000..b90c7e5c --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/README.md @@ -0,0 +1,122 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) +[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose) +[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +Tables of supported algorithms are shown below. The library supports both +the compact and JWS/JWE JSON Serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +[Version 2](https://gopkg.in/go-jose/go-jose.v2) +([branch](https://github.com/go-jose/go-jose/tree/v2), +[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version: + + import "gopkg.in/go-jose/go-jose.v2" + +[Version 3](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/tree/master), +[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet): + + import "github.com/go-jose/go-jose/v3" + +All new feature development takes place on the `master` branch, which we are +preparing to release as version 3 soon. Version 2 will continue to receive +critical bug and security fixes. Note that starting with version 3 we are +using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher. + +Version 1 (on the `v1` branch) is frozen and not supported anymore. + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + Ed25519 | EdDSA2 + +2. Only available in version 2 of the package + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey) + AES, HMAC | []byte + +1. Only available in version 2 or later of the package + +## Examples + +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go new file mode 100644 index 00000000..78abc326 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go @@ -0,0 +1,592 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + josecipher "github.com/go-jose/go-jose/v3/cipher" + "github.com/go-jose/go-jose/v3/json" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + + headers := rawHeader{ + headerEPK: makeRawMessage(b), + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + if epk == nil { + return nil, errors.New("go-jose/go-jose: missing epk header") + } + + publicKey, ok := epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + + if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return nil, errors.New("go-jose/go-jose: invalid public key in epk header") + } + + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apv header") + } + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) + } + + var keySize int + + algorithm := headers.getAlgorithm() + switch algorithm { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(string(algorithm), keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("go-jose/go-jose: ed25519 signature failed to verify") + } + return nil +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the outputs (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if len(signature) != 2*keySize { + return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("go-jose/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/BUILD b/vendor/github.com/go-jose/go-jose/v3/cipher/BUILD new file mode 100644 index 00000000..7816d182 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/BUILD @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "cipher", + srcs = [ + "cbc_hmac.go", + "concat_kdf.go", + "ecdh_es.go", + "key_wrap.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/go-jose/go-jose/v3/cipher", + importpath = "github.com/go-jose/go-jose/v3/cipher", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go new file mode 100644 index 00000000..af029cec --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, ciphertext[:offset]...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures that the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n uint64) (head, tail []byte) { + if uint64(cap(in)) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go new file mode 100644 index 00000000..f62c3bdb --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go new file mode 100644 index 00000000..093c6467 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go @@ -0,0 +1,86 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +// It is an error to call this function with a private/public key that are not on the same +// curve. Callers must ensure that the keys are valid before calling this function. Output +// size may be at most 1<<16 bytes (64 KiB). +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + if size > 1<<16 { + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") + } + + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { + panic("public key not on same curve as private key") + } + + z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + zBytes := z.Bytes() + + // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from + // the returned byte array. This can lead to a problem where zBytes will be + // shorter than expected which breaks the key derivation. Therefore we must pad + // to the full length of the expected coordinate here before calling the KDF. + octSize := dSize(priv.Curve) + if len(zBytes) != octSize { + zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) + } + + reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + + return key +} + +// dSize returns the size in octets for a coordinate on a elliptic curve. +func dSize(curve elliptic.Curve) int { + order := curve.Params().P + bitLen := order.BitLen() + size := bitLen / 8 + if bitLen%8 != 0 { + size++ + } + return size +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go new file mode 100644 index 00000000..b9effbca --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("go-jose/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go new file mode 100644 index 00000000..6901137e --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go @@ -0,0 +1,544 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "reflect" + + "github.com/go-jose/go-jose/v3/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of additional keys to be inserted into the protected header + // of a JWS object. Some specifications which make use of JWS like to insert + // additional values here. All values must be JSON-serializable. + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary. It returns itself and so can be used in a fluent style. +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +// +// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used +// on the password-based encryption algorithms PBES2-HS256+A128KW, +// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe +// default of 100000 will be used for the count and a 128-bit random salt will +// be generated. +type Recipient struct { + Algorithm KeyAlgorithm + Key interface{} + KeyID string + PBES2Count int + PBES2Salt []byte +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case OpaqueKeyEncrypter: + keyID, rawKey = encryptionKey.KeyID(), encryptionKey + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + return nil, ErrInvalidKeySize + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if len(rcpts) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + switch recipient.Algorithm { + case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { + sr.p2c = recipient.PBES2Count + sr.p2s = recipient.PBES2Salt + } + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case string: + return newSymmetricRecipient(alg, []byte(encryptionKey)) + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + } + if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { + return newOpaqueKeyEncrypter(alg, encrypter) + } + return recipientKeyInfo{}, ErrUnsupportedKeyType +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case string: + return &symmetricKeyCipher{ + key: []byte(decryptionKey), + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + } + if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { + return &opaqueKeyDecrypter{decrypter: okd}, nil + } + return nil, ErrUnsupportedKeyType +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. Note that this +// function does not support multi-recipient, if you desire multi-recipient +// decryption use DecryptMulti instead. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") + } + + critical, err := headers.getCritical() + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + return plaintext, err +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + critical, err := globalHeaders.getCritical() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, _ = decompress(comp, plaintext) + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go new file mode 100644 index 00000000..71ec1c41 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/doc.go @@ -0,0 +1,27 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web +Token support available in a sub-package. The library supports both the compact +and JWS/JWE JSON Serialization formats, and has optional support for multiple +recipients. + +*/ +package jose diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go new file mode 100644 index 00000000..968a4249 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go @@ -0,0 +1,191 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "io" + "math/big" + "strings" + "unicode" + + "github.com/go-jose/go-jose/v3/json" +) + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/go-jose/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + buf := strings.Builder{} + buf.Grow(len(data)) + for _, r := range data { + if !unicode.IsSpace(r) { + buf.WriteRune(r) + } + } + return buf.String() +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Compress with DEFLATE +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// Decompress with DEFLATE +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + _, err := io.Copy(output, reader) + if err != nil { + return nil, err + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return json.Marshal(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := json.Unmarshal(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64URLDecode(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64.RawURLEncoding.EncodeToString(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} + +// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C +func base64URLDecode(value string) ([]byte, error) { + value = strings.TrimRight(value, "=") + return base64.RawURLEncoding.DecodeString(value) +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/BUILD b/vendor/github.com/go-jose/go-jose/v3/json/BUILD new file mode 100644 index 00000000..5b00b5de --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/BUILD @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "json", + srcs = [ + "decode.go", + "encode.go", + "indent.go", + "scanner.go", + "stream.go", + "tags.go", + ], + importmap = "go.resf.org/peridot/vendor/github.com/go-jose/go-jose/v3/json", + importpath = "github.com/go-jose/go-jose/v3/json", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/go-jose/go-jose/v3/json/LICENSE b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-jose/go-jose/v3/json/README.md b/vendor/github.com/go-jose/go-jose/v3/json/README.md new file mode 100644 index 00000000..86de5e55 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go new file mode 100644 index 00000000..4dbc4146 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/decode.go @@ -0,0 +1,1217 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +type NumberUnmarshalType int + +const ( + // unmarshal a JSON number into an interface{} as a float64 + UnmarshalFloat NumberUnmarshalType = iota + // unmarshal a JSON number into an interface{} as a `json.Number` + UnmarshalJSONNumber + // unmarshal a JSON number into an interface{} as a int64 + // if value is an integer otherwise float64 + UnmarshalIntOrFloat +) + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + numberType NumberUnmarshalType +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64, int64 or a Number +// depending on d.numberDecodeType. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + switch d.numberType { + + case UnmarshalJSONNumber: + return Number(s), nil + case UnmarshalIntOrFloat: + v, err := strconv.ParseInt(s, 10, 64) + if err == nil { + return v, nil + } + + // tries to parse integer number in scientific notation + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + + // if it has no decimal value use int64 + if fi, fd := math.Modf(f); fd == 0.0 { + return int64(fi), nil + } + return f, nil + default: + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil + } + +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go new file mode 100644 index 00000000..ea0a1361 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML " that closes the next token. If + // non-empty, the subsequent call to Next will return a raw or RCDATA text + // token: one that treats "

" as text instead of an element. + // rawTag's contents are lower-cased. + rawTag string + // textIsRaw is whether the current text token's data is not escaped. + textIsRaw bool + // convertNUL is whether NUL bytes in the current token's data should + // be converted into \ufffd replacement characters. + convertNUL bool + // allowCDATA is whether CDATA sections are allowed in the current context. + allowCDATA bool +} + +// AllowCDATA sets whether or not the tokenizer recognizes as +// the text "foo". The default value is false, which means to recognize it as +// a bogus comment "" instead. +// +// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and +// only if tokenizing foreign content, such as MathML and SVG. However, +// tracking foreign-contentness is difficult to do purely in the tokenizer, +// as opposed to the parser, due to HTML integration points: an element +// can contain a that is foreign-to-SVG but not foreign-to- +// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call AllowCDATA as appropriate. +// In practice, if using the tokenizer without caring whether MathML or SVG +// CDATA is text or comments, such as tokenizing HTML to find all the anchor +// text, it is acceptable to ignore this responsibility. +func (z *Tokenizer) AllowCDATA(allowCDATA bool) { + z.allowCDATA = allowCDATA +} + +// NextIsNotRawText instructs the tokenizer that the next token should not be +// considered as 'raw text'. Some elements, such as script and title elements, +// normally require the next token after the opening tag to be 'raw text' that +// has no child elements. For example, tokenizing "a<b>c</b>d" +// yields a start tag token for "", a text token for "a<b>c</b>d", and +// an end tag token for "". There are no distinct start tag or end tag +// tokens for the "" and "". +// +// This tokenizer implementation will generally look for raw text at the right +// times. Strictly speaking, an HTML5 compliant tokenizer should not look for +// raw text if in foreign content: generally needs raw text, but a +// <title> inside an <svg> does not. Another example is that a <textarea> +// generally needs raw text, but a <textarea> is not allowed as an immediate +// child of a <select>; in normal parsing, a <textarea> implies </select>, but +// one cannot close the implicit element when parsing a <select>'s InnerHTML. +// Similarly to AllowCDATA, tracking the correct moment to override raw-text- +// ness is difficult to do purely in the tokenizer, as opposed to the parser. +// For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call NextIsNotRawText as +// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this +// responsibility for basic usage. +// +// Note that this 'raw text' concept is different from the one offered by the +// Tokenizer.Raw method. +func (z *Tokenizer) NextIsNotRawText() { + z.rawTag = "" +} + +// Err returns the error associated with the most recent ErrorToken token. +// This is typically io.EOF, meaning the end of tokenization. +func (z *Tokenizer) Err() error { + if z.tt != ErrorToken { + return nil + } + return z.err +} + +// readByte returns the next byte from the input stream, doing a buffered read +// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte +// slice that holds all the bytes read so far for the current token. +// It sets z.err if the underlying reader returns an error. +// Pre-condition: z.err == nil. +func (z *Tokenizer) readByte() byte { + if z.raw.end >= len(z.buf) { + // Our buffer is exhausted and we have to read from z.r. Check if the + // previous read resulted in an error. + if z.readErr != nil { + z.err = z.readErr + return 0 + } + // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length + // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we + // allocate a new buffer before the copy. + c := cap(z.buf) + d := z.raw.end - z.raw.start + var buf1 []byte + if 2*d > c { + buf1 = make([]byte, d, 2*c) + } else { + buf1 = z.buf[:d] + } + copy(buf1, z.buf[z.raw.start:z.raw.end]) + if x := z.raw.start; x != 0 { + // Adjust the data/attr spans to refer to the same contents after the copy. + z.data.start -= x + z.data.end -= x + z.pendingAttr[0].start -= x + z.pendingAttr[0].end -= x + z.pendingAttr[1].start -= x + z.pendingAttr[1].end -= x + for i := range z.attr { + z.attr[i][0].start -= x + z.attr[i][0].end -= x + z.attr[i][1].start -= x + z.attr[i][1].end -= x + } + } + z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d] + // Now that we have copied the live bytes to the start of the buffer, + // we read from z.r into the remainder. + var n int + n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)]) + if n == 0 { + z.err = z.readErr + return 0 + } + z.buf = buf1[:d+n] + } + x := z.buf[z.raw.end] + z.raw.end++ + if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf { + z.err = ErrBufferExceeded + return 0 + } + return x +} + +// Buffered returns a slice containing data buffered but not yet tokenized. +func (z *Tokenizer) Buffered() []byte { + return z.buf[z.raw.end:] +} + +// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil). +// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil) +// too many times in succession. +func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { + for i := 0; i < 100; i++ { + if n, err := r.Read(b); n != 0 || err != nil { + return n, err + } + } + return 0, io.ErrNoProgress +} + +// skipWhiteSpace skips past any white space. +func (z *Tokenizer) skipWhiteSpace() { + if z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + // No-op. + default: + z.raw.end-- + return + } + } +} + +// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and +// is typically something like "script" or "textarea". +func (z *Tokenizer) readRawOrRCDATA() { + if z.rawTag == "script" { + z.readScript() + z.textIsRaw = true + z.rawTag = "" + return + } +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + c = z.readByte() + if z.err != nil { + break loop + } + if c != '/' { + z.raw.end-- + continue loop + } + if z.readRawEndTag() || z.err != nil { + break loop + } + } + z.data.end = z.raw.end + // A textarea's or title's RCDATA can contain escaped entities. + z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title" + z.rawTag = "" +} + +// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag. +// If it succeeds, it backs up the input position to reconsume the tag and +// returns true. Otherwise it returns false. The opening "</" has already been +// consumed. +func (z *Tokenizer) readRawEndTag() bool { + for i := 0; i < len(z.rawTag); i++ { + c := z.readByte() + if z.err != nil { + return false + } + if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') { + z.raw.end-- + return false + } + } + c := z.readByte() + if z.err != nil { + return false + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // The 3 is 2 for the leading "</" plus 1 for the trailing character c. + z.raw.end -= 3 + len(z.rawTag) + return true + } + z.raw.end-- + return false +} + +// readScript reads until the next </script> tag, following the byzantine +// rules for escaping/hiding the closing tag. +func (z *Tokenizer) readScript() { + defer func() { + z.data.end = z.raw.end + }() + var c byte + +scriptData: + c = z.readByte() + if z.err != nil { + return + } + if c == '<' { + goto scriptDataLessThanSign + } + goto scriptData + +scriptDataLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '/': + goto scriptDataEndTagOpen + case '!': + goto scriptDataEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptData + +scriptDataEscapeStart: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapeStartDash + } + z.raw.end-- + goto scriptData + +scriptDataEscapeStartDash: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapedDashDash + } + z.raw.end-- + goto scriptData + +scriptDataEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataEscaped + +scriptDataEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataEscapedEndTagOpen + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + goto scriptDataDoubleEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEscapedEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptDataEscaped + +scriptDataDoubleEscapeStart: + z.raw.end-- + for i := 0; i < len("script"); i++ { + c = z.readByte() + if z.err != nil { + return + } + if c != "script"[i] && c != "SCRIPT"[i] { + z.raw.end-- + goto scriptDataEscaped + } + } + c = z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + goto scriptDataDoubleEscaped + } + z.raw.end-- + goto scriptDataEscaped + +scriptDataDoubleEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataDoubleEscapeEnd + } + z.raw.end-- + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapeEnd: + if z.readRawEndTag() { + z.raw.end += len("</script>") + goto scriptDataEscaped + } + if z.err != nil { + return + } + goto scriptDataDoubleEscaped +} + +// readComment reads the next comment token starting with "<!--". The opening +// "<!--" has already been consumed. +func (z *Tokenizer) readComment() { + // When modifying this function, consider manually increasing the + // maxSuffixLen constant in func TestComments, from 6 to e.g. 9 or more. + // That increase should only be temporary, not committed, as it + // exponentially affects the test running time. + + z.data.start = z.raw.end + defer func() { + if z.data.end < z.data.start { + // It's a comment with no data, like <!-->. + z.data.end = z.data.start + } + }() + + var dashCount int + beginning := true + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.calculateAbruptCommentDataEnd() + return + } + switch c { + case '-': + dashCount++ + continue + case '>': + if dashCount >= 2 || beginning { + z.data.end = z.raw.end - len("-->") + return + } + case '!': + if dashCount >= 2 { + c = z.readByte() + if z.err != nil { + z.data.end = z.calculateAbruptCommentDataEnd() + return + } else if c == '>' { + z.data.end = z.raw.end - len("--!>") + return + } else if c == '-' { + dashCount = 1 + beginning = false + continue + } + } + } + dashCount = 0 + beginning = false + } +} + +func (z *Tokenizer) calculateAbruptCommentDataEnd() int { + raw := z.Raw() + const prefixLen = len("<!--") + if len(raw) >= prefixLen { + raw = raw[prefixLen:] + if hasSuffix(raw, "--!") { + return z.raw.end - 3 + } else if hasSuffix(raw, "--") { + return z.raw.end - 2 + } else if hasSuffix(raw, "-") { + return z.raw.end - 1 + } + } + return z.raw.end +} + +func hasSuffix(b []byte, suffix string) bool { + if len(b) < len(suffix) { + return false + } + b = b[len(b)-len(suffix):] + for i := range b { + if b[i] != suffix[i] { + return false + } + } + return true +} + +// readUntilCloseAngle reads until the next ">". +func (z *Tokenizer) readUntilCloseAngle() { + z.data.start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len(">") + return + } + } +} + +// readMarkupDeclaration reads the next token starting with "<!". It might be +// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or +// "<!a bogus comment". The opening "<!" has already been consumed. +func (z *Tokenizer) readMarkupDeclaration() TokenType { + z.data.start = z.raw.end + var c [2]byte + for i := 0; i < 2; i++ { + c[i] = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return CommentToken + } + } + if c[0] == '-' && c[1] == '-' { + z.readComment() + return CommentToken + } + z.raw.end -= 2 + if z.readDoctype() { + return DoctypeToken + } + if z.allowCDATA && z.readCDATA() { + z.convertNUL = true + return TextToken + } + // It's a bogus comment. + z.readUntilCloseAngle() + return CommentToken +} + +// readDoctype attempts to read a doctype declaration and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readDoctype() bool { + const s = "DOCTYPE" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] && c != s[i]+('a'-'A') { + // Back up to read the fragment of "DOCTYPE" again. + z.raw.end = z.data.start + return false + } + } + if z.skipWhiteSpace(); z.err != nil { + z.data.start = z.raw.end + z.data.end = z.raw.end + return true + } + z.readUntilCloseAngle() + return true +} + +// readCDATA attempts to read a CDATA section and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readCDATA() bool { + const s = "[CDATA[" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] { + // Back up to read the fragment of "[CDATA[" again. + z.raw.end = z.data.start + return false + } + } + z.data.start = z.raw.end + brackets := 0 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return true + } + switch c { + case ']': + brackets++ + case '>': + if brackets >= 2 { + z.data.end = z.raw.end - len("]]>") + return true + } + brackets = 0 + default: + brackets = 0 + } + } +} + +// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end] +// case-insensitively matches any element of ss. +func (z *Tokenizer) startTagIn(ss ...string) bool { +loop: + for _, s := range ss { + if z.data.end-z.data.start != len(s) { + continue loop + } + for i := 0; i < len(s); i++ { + c := z.buf[z.data.start+i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if c != s[i] { + continue loop + } + } + return true + } + return false +} + +// readStartTag reads the next start tag token. The opening "<a" has already +// been consumed, where 'a' means anything in [A-Za-z]. +func (z *Tokenizer) readStartTag() TokenType { + z.readTag(true) + if z.err != nil { + return ErrorToken + } + // Several tags flag the tokenizer's next token as raw. + c, raw := z.buf[z.data.start], false + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + switch c { + case 'i': + raw = z.startTagIn("iframe") + case 'n': + raw = z.startTagIn("noembed", "noframes", "noscript") + case 'p': + raw = z.startTagIn("plaintext") + case 's': + raw = z.startTagIn("script", "style") + case 't': + raw = z.startTagIn("textarea", "title") + case 'x': + raw = z.startTagIn("xmp") + } + if raw { + z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) + } + // Look for a self-closing token like "<br/>". + if z.err == nil && z.buf[z.raw.end-2] == '/' { + return SelfClosingTagToken + } + return StartTagToken +} + +// readTag reads the next tag token and its attributes. If saveAttr, those +// attributes are saved in z.attr, otherwise z.attr is set to an empty slice. +// The opening "<a" or "</a" has already been consumed, where 'a' means anything +// in [A-Za-z]. +func (z *Tokenizer) readTag(saveAttr bool) { + z.attr = z.attr[:0] + z.nAttrReturned = 0 + // Read the tag name and attribute key/value pairs. + z.readTagName() + if z.skipWhiteSpace(); z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil || c == '>' { + break + } + z.raw.end-- + z.readTagAttrKey() + z.readTagAttrVal() + // Save pendingAttr if saveAttr and that attribute has a non-empty key. + if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end { + z.attr = append(z.attr, z.pendingAttr) + } + if z.skipWhiteSpace(); z.err != nil { + break + } + } +} + +// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end) +// is positioned such that the first byte of the tag name (the "d" in "<div") +// has already been consumed. +func (z *Tokenizer) readTagName() { + z.data.start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.data.end = z.raw.end - 1 + return + case '/', '>': + z.raw.end-- + z.data.end = z.raw.end + return + } + } +} + +// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>". +// Precondition: z.err == nil. +func (z *Tokenizer) readTagAttrKey() { + z.pendingAttr[0].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[0].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/': + z.pendingAttr[0].end = z.raw.end - 1 + return + case '=': + if z.pendingAttr[0].start+1 == z.raw.end { + // WHATWG 13.2.5.32, if we see an equals sign before the attribute name + // begins, we treat it as a character in the attribute name and continue. + continue + } + fallthrough + case '>': + z.raw.end-- + z.pendingAttr[0].end = z.raw.end + return + } + } +} + +// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>". +func (z *Tokenizer) readTagAttrVal() { + z.pendingAttr[1].start = z.raw.end + z.pendingAttr[1].end = z.raw.end + if z.skipWhiteSpace(); z.err != nil { + return + } + c := z.readByte() + if z.err != nil { + return + } + if c != '=' { + z.raw.end-- + return + } + if z.skipWhiteSpace(); z.err != nil { + return + } + quote := z.readByte() + if z.err != nil { + return + } + switch quote { + case '>': + z.raw.end-- + return + + case '\'', '"': + z.pendingAttr[1].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + if c == quote { + z.pendingAttr[1].end = z.raw.end - 1 + return + } + } + + default: + z.pendingAttr[1].start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.pendingAttr[1].end = z.raw.end - 1 + return + case '>': + z.raw.end-- + z.pendingAttr[1].end = z.raw.end + return + } + } + } +} + +// Next scans the next token and returns its type. +func (z *Tokenizer) Next() TokenType { + z.raw.start = z.raw.end + z.data.start = z.raw.end + z.data.end = z.raw.end + if z.err != nil { + z.tt = ErrorToken + return z.tt + } + if z.rawTag != "" { + if z.rawTag == "plaintext" { + // Read everything up to EOF. + for z.err == nil { + z.readByte() + } + z.data.end = z.raw.end + z.textIsRaw = true + } else { + z.readRawOrRCDATA() + } + if z.data.end > z.data.start { + z.tt = TextToken + z.convertNUL = true + return z.tt + } + } + z.textIsRaw = false + z.convertNUL = false + +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + + // Check if the '<' we have just read is part of a tag, comment + // or doctype. If not, it's part of the accumulated text token. + c = z.readByte() + if z.err != nil { + break loop + } + var tokenType TokenType + switch { + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': + tokenType = StartTagToken + case c == '/': + tokenType = EndTagToken + case c == '!' || c == '?': + // We use CommentToken to mean any of "<!--actual comments-->", + // "<!DOCTYPE declarations>" and "<?xml processing instructions?>". + tokenType = CommentToken + default: + // Reconsume the current character. + z.raw.end-- + continue + } + + // We have a non-text token, but we might have accumulated some text + // before that. If so, we return the text first, and return the non- + // text token on the subsequent call to Next. + if x := z.raw.end - len("<a"); z.raw.start < x { + z.raw.end = x + z.data.end = x + z.tt = TextToken + return z.tt + } + switch tokenType { + case StartTagToken: + z.tt = z.readStartTag() + return z.tt + case EndTagToken: + c = z.readByte() + if z.err != nil { + break loop + } + if c == '>' { + // "</>" does not generate a token at all. Generate an empty comment + // to allow passthrough clients to pick up the data using Raw. + // Reset the tokenizer state and start again. + z.tt = CommentToken + return z.tt + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + z.readTag(false) + if z.err != nil { + z.tt = ErrorToken + } else { + z.tt = EndTagToken + } + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + case CommentToken: + if c == '!' { + z.tt = z.readMarkupDeclaration() + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + } + } + if z.raw.start < z.raw.end { + z.data.end = z.raw.end + z.tt = TextToken + return z.tt + } + z.tt = ErrorToken + return z.tt +} + +// Raw returns the unmodified text of the current token. Calling Next, Token, +// Text, TagName or TagAttr may change the contents of the returned slice. +// +// The token stream's raw bytes partition the byte stream (up until an +// ErrorToken). There are no overlaps or gaps between two consecutive token's +// raw bytes. One implication is that the byte offset of the current token is +// the sum of the lengths of all previous tokens' raw bytes. +func (z *Tokenizer) Raw() []byte { + return z.buf[z.raw.start:z.raw.end] +} + +// convertNewlines converts "\r" and "\r\n" in s to "\n". +// The conversion happens in place, but the resulting slice may be shorter. +func convertNewlines(s []byte) []byte { + for i, c := range s { + if c != '\r' { + continue + } + + src := i + 1 + if src >= len(s) || s[src] != '\n' { + s[i] = '\n' + continue + } + + dst := i + for src < len(s) { + if s[src] == '\r' { + if src+1 < len(s) && s[src+1] == '\n' { + src++ + } + s[dst] = '\n' + } else { + s[dst] = s[src] + } + src++ + dst++ + } + return s[:dst] + } + return s +} + +var ( + nul = []byte("\x00") + replacement = []byte("\ufffd") +) + +// Text returns the unescaped text of a text, comment or doctype token. The +// contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) Text() []byte { + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + s = convertNewlines(s) + if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) { + s = bytes.Replace(s, nul, replacement, -1) + } + if !z.textIsRaw { + s = unescape(s, false) + } + return s + } + return nil +} + +// TagName returns the lower-cased name of a tag token (the `img` out of +// `<IMG SRC="foo">`) and whether the tag has attributes. +// The contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) TagName() (name []byte, hasAttr bool) { + if z.data.start < z.data.end { + switch z.tt { + case StartTagToken, EndTagToken, SelfClosingTagToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + return lower(s), z.nAttrReturned < len(z.attr) + } + } + return nil, false +} + +// TagAttr returns the lower-cased key and unescaped value of the next unparsed +// attribute for the current tag token and whether there are more attributes. +// The contents of the returned slices may change on the next call to Next. +func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { + if z.nAttrReturned < len(z.attr) { + switch z.tt { + case StartTagToken, SelfClosingTagToken: + x := z.attr[z.nAttrReturned] + z.nAttrReturned++ + key = z.buf[x[0].start:x[0].end] + val = z.buf[x[1].start:x[1].end] + return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr) + } + } + return nil, nil, false +} + +// Token returns the current Token. The result's Data and Attr values remain +// valid after subsequent Next calls. +func (z *Tokenizer) Token() Token { + t := Token{Type: z.tt} + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + t.Data = string(z.Text()) + case StartTagToken, SelfClosingTagToken, EndTagToken: + name, moreAttr := z.TagName() + for moreAttr { + var key, val []byte + key, val, moreAttr = z.TagAttr() + t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)}) + } + if a := atom.Lookup(name); a != 0 { + t.DataAtom, t.Data = a, a.String() + } else { + t.DataAtom, t.Data = 0, string(name) + } + } + return t +} + +// SetMaxBuf sets a limit on the amount of data buffered during tokenization. +// A value of 0 means unlimited. +func (z *Tokenizer) SetMaxBuf(n int) { + z.maxBuf = n +} + +// NewTokenizer returns a new HTML Tokenizer for the given Reader. +// The input is assumed to be UTF-8 encoded. +func NewTokenizer(r io.Reader) *Tokenizer { + return NewTokenizerFragment(r, "") +} + +// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for +// tokenizing an existing element's InnerHTML fragment. contextTag is that +// element's tag, such as "div" or "iframe". +// +// For example, how the InnerHTML "a<b" is tokenized depends on whether it is +// for a <p> tag or a <script> tag. +// +// The input is assumed to be UTF-8 encoded. +func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer { + z := &Tokenizer{ + r: r, + buf: make([]byte, 0, 4096), + } + if contextTag != "" { + switch s := strings.ToLower(contextTag); s { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp": + z.rawTag = s + } + } + return z +} diff --git a/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go b/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go new file mode 100644 index 00000000..44af1f1a --- /dev/null +++ b/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitbucket provides constants for using OAuth2 to access Bitbucket. +package bitbucket + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Bitbucket's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://bitbucket.org/site/oauth2/authorize", + TokenURL: "https://bitbucket.org/site/oauth2/access_token", +} diff --git a/vendor/golang.org/x/oauth2/github/github.go b/vendor/golang.org/x/oauth2/github/github.go new file mode 100644 index 00000000..f2978015 --- /dev/null +++ b/vendor/golang.org/x/oauth2/github/github.go @@ -0,0 +1,16 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package github provides constants for using OAuth2 to access Github. +package github // import "golang.org/x/oauth2/github" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Github's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://github.com/login/oauth/authorize", + TokenURL: "https://github.com/login/oauth/access_token", +} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index be8f5a86..e064a1a2 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -113,6 +113,20 @@ const ( opObj = 'O' // .Obj() (Named, TypeParam) ) +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() +} + // For returns the path to an object relative to its package, // or an error if the object is not accessible from the package's Scope. // @@ -145,24 +159,7 @@ const ( // .Type().Field(0) (field Var X) // // where p is the package (*types.Package) to which X belongs. -func For(obj types.Object) (Path, error) { - return newEncoderFor()(obj) -} - -// An encoder amortizes the cost of encoding the paths of multiple objects. -// Nonexported pending approval of proposal 58668. -type encoder struct { - scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() - namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() -} - -// Exposed to gopls via golang.org/x/tools/internal/typesinternal -// pending approval of proposal 58668. -// -//go:linkname newEncoderFor -func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For } - -func (enc *encoder) For(obj types.Object) (Path, error) { +func (enc *Encoder) For(obj types.Object) (Path, error) { pkg := obj.Pkg() // This table lists the cases of interest. @@ -341,7 +338,7 @@ func appendOpArg(path []byte, op byte, arg int) []byte { // This function is just an optimization that avoids the general scope walking // approach. You are expected to fall back to the general approach if this // function fails. -func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) { +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // Concrete methods can only be declared on package-scoped named types. For // that reason we can skip the expensive walk over the package scope: the // path will always be package -> named type -> method. We can trivially get @@ -730,8 +727,23 @@ func namedMethods(named *types.Named) []*types.Func { return methods } +// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. +func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { + m := enc.namedMethodsMemo + if m == nil { + m = make(map[*types.Named][]*types.Func) + enc.namedMethodsMemo = m + } + methods, ok := m[named] + if !ok { + methods = namedMethods(named) // allocates and sorts + m[named] = methods + } + return methods +} + // scopeNames is a memoization of scope.Names. Callers must not modify the result. -func (enc *encoder) scopeNames(scope *types.Scope) []string { +func (enc *Encoder) scopeNames(scope *types.Scope) []string { m := enc.scopeNamesMemo if m == nil { m = make(map[*types.Scope][]string) @@ -744,19 +756,3 @@ func (enc *encoder) scopeNames(scope *types.Scope) []string { } return names } - -// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. -func (enc *encoder) namedMethods(named *types.Named) []*types.Func { - m := enc.namedMethodsMemo - if m == nil { - m = make(map[*types.Named][]*types.Func) - enc.namedMethodsMemo = m - } - methods, ok := m[named] - if !ok { - methods = namedMethods(named) // allocates and sorts - m[named] = methods - } - return methods - -} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index d5055169..3c0afe72 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,10 +8,12 @@ package gocommand import ( "bytes" "context" + "errors" "fmt" "io" "log" "os" + "reflect" "regexp" "runtime" "strconv" @@ -215,6 +217,18 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd := exec.Command("go", goArgs...) cmd.Stdout = stdout cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -229,6 +243,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) @@ -242,10 +257,85 @@ var DebugHangingGoCommands = false // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { - if err := cmd.Start(); err != nil { +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that that has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that that still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { return err } + resChan := make(chan error, 1) go func() { resChan <- cmd.Wait() @@ -253,11 +343,14 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { // If we're interested in debugging hanging Go commands, stop waiting after a // minute and panic with interesting information. - if DebugHangingGoCommands { + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() select { case err := <-resChan: return err - case <-time.After(1 * time.Minute): + case <-timer.C: HandleHangingGoCommand(cmd.Process) case <-ctx.Done(): } @@ -270,30 +363,25 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { } // Cancelled. Interrupt and see if it ends voluntarily. - cmd.Process.Signal(os.Interrupt) - select { - case err := <-resChan: - return err - case <-time.After(time.Second): + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } } // Didn't shut down in response to interrupt. Kill it hard. // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT // on certain platforms, such as unix. - if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { - // Don't panic here as this reliably fails on windows with EINVAL. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } - // See above: don't wait indefinitely if we're debugging hanging Go commands. - if DebugHangingGoCommands { - select { - case err := <-resChan: - return err - case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill - HandleHangingGoCommand(cmd.Process) - } - } return <-resChan } diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 307a76d4..446c5846 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -23,21 +23,11 @@ import ( func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} - inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") - // Unset any unneeded flags, and remove them from BuildFlags, if they're - // present. - inv.ModFile = "" + inv.BuildFlags = nil // This is not a build command. inv.ModFlag = "" - var buildFlags []string - for _, flag := range inv.BuildFlags { - // Flags can be prefixed by one or two dashes. - f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") - if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { - continue - } - buildFlags = append(buildFlags, flag) - } - inv.BuildFlags = buildFlags + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + stdoutBytes, err := r.Run(ctx, inv) if err != nil { return 0, err diff --git a/vendor/google.golang.org/api/admin/directory/v1/admin-api.json b/vendor/google.golang.org/api/admin/directory/v1/admin-api.json new file mode 100644 index 00000000..b5a9b5fd --- /dev/null +++ b/vendor/google.golang.org/api/admin/directory/v1/admin-api.json @@ -0,0 +1,8403 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/admin.chrome.printers": { + "description": "See, add, edit, and permanently delete the printers that your organization can use with Chrome" + }, + "https://www.googleapis.com/auth/admin.chrome.printers.readonly": { + "description": "See the printers that your organization can use with Chrome" + }, + "https://www.googleapis.com/auth/admin.directory.customer": { + "description": "View and manage customer related information" + }, + "https://www.googleapis.com/auth/admin.directory.customer.readonly": { + "description": "View customer related information" + }, + "https://www.googleapis.com/auth/admin.directory.device.chromeos": { + "description": "View and manage your Chrome OS devices' metadata" + }, + "https://www.googleapis.com/auth/admin.directory.device.chromeos.readonly": { + "description": "View your Chrome OS devices' metadata" + }, + "https://www.googleapis.com/auth/admin.directory.device.mobile": { + "description": "View and manage your mobile devices' metadata" + }, + "https://www.googleapis.com/auth/admin.directory.device.mobile.action": { + "description": "Manage your mobile devices by performing administrative tasks" + }, + "https://www.googleapis.com/auth/admin.directory.device.mobile.readonly": { + "description": "View your mobile devices' metadata" + }, + "https://www.googleapis.com/auth/admin.directory.domain": { + "description": "View and manage the provisioning of domains for your customers" + }, + "https://www.googleapis.com/auth/admin.directory.domain.readonly": { + "description": "View domains related to your customers" + }, + "https://www.googleapis.com/auth/admin.directory.group": { + "description": "View and manage the provisioning of groups on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.group.member": { + "description": "View and manage group subscriptions on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.group.member.readonly": { + "description": "View group subscriptions on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.group.readonly": { + "description": "View groups on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.orgunit": { + "description": "View and manage organization units on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.orgunit.readonly": { + "description": "View organization units on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.resource.calendar": { + "description": "View and manage the provisioning of calendar resources on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly": { + "description": "View calendar resources on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.rolemanagement": { + "description": "Manage delegated admin roles for your domain" + }, + "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly": { + "description": "View delegated admin roles for your domain" + }, + "https://www.googleapis.com/auth/admin.directory.user": { + "description": "View and manage the provisioning of users on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.user.alias": { + "description": "View and manage user aliases on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.user.alias.readonly": { + "description": "View user aliases on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.user.readonly": { + "description": "See info about users on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.user.security": { + "description": "Manage data access permissions for users on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.userschema": { + "description": "View and manage the provisioning of user schemas on your domain" + }, + "https://www.googleapis.com/auth/admin.directory.userschema.readonly": { + "description": "View user schemas on your domain" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account." + } + } + } + }, + "basePath": "", + "baseUrl": "https://admin.googleapis.com/", + "batchPath": "batch", + "canonicalName": "directory", + "description": "Admin SDK lets administrators of enterprise domains to view and manage resources like user, groups etc. It also provides audit and usage reports of domain.", + "discoveryVersion": "v1", + "documentationLink": "https://developers.google.com/admin-sdk/", + "fullyEncodeReservedExpansion": true, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "id": "admin:directory_v1", + "kind": "discovery#restDescription", + "mtlsRootUrl": "https://admin.mtls.googleapis.com/", + "name": "admin", + "ownerDomain": "google.com", + "ownerName": "Google", + "packagePath": "admin", + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, + "alt": { + "default": "json", + "description": "Data format for response.", + "enum": [ + "json", + "media", + "proto" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "type": "string" + }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "asps": { + "methods": { + "delete": { + "description": "Deletes an ASP issued by a user.", + "flatPath": "admin/directory/v1/users/{userKey}/asps/{codeId}", + "httpMethod": "DELETE", + "id": "directory.asps.delete", + "parameterOrder": [ + "userKey", + "codeId" + ], + "parameters": { + "codeId": { + "description": "The unique ID of the ASP to be deleted.", + "format": "int32", + "location": "path", + "required": true, + "type": "integer" + }, + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/asps/{codeId}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + }, + "get": { + "description": "Gets information about an ASP issued by a user.", + "flatPath": "admin/directory/v1/users/{userKey}/asps/{codeId}", + "httpMethod": "GET", + "id": "directory.asps.get", + "parameterOrder": [ + "userKey", + "codeId" + ], + "parameters": { + "codeId": { + "description": "The unique ID of the ASP.", + "format": "int32", + "location": "path", + "required": true, + "type": "integer" + }, + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/asps/{codeId}", + "response": { + "$ref": "Asp" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + }, + "list": { + "description": "Lists the ASPs issued by a user.", + "flatPath": "admin/directory/v1/users/{userKey}/asps", + "httpMethod": "GET", + "id": "directory.asps.list", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/asps", + "response": { + "$ref": "Asps" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + } + } + }, + "channels": { + "methods": { + "stop": { + "description": "Stops watching resources through this channel.", + "flatPath": "admin/directory_v1/channels/stop", + "httpMethod": "POST", + "id": "admin.channels.stop", + "parameterOrder": [], + "parameters": {}, + "path": "admin/directory_v1/channels/stop", + "request": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/admin.directory.user.alias", + "https://www.googleapis.com/auth/admin.directory.user.alias.readonly", + "https://www.googleapis.com/auth/admin.directory.user.readonly", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "chromeosdevices": { + "methods": { + "action": { + "description": "Takes an action that affects a Chrome OS Device. This includes deprovisioning, disabling, and re-enabling devices. *Warning:* * Deprovisioning a device will stop device policy syncing and remove device-level printers. After a device is deprovisioned, it must be wiped before it can be re-enrolled. * Lost or stolen devices should use the disable action. * Re-enabling a disabled device will consume a device license. If you do not have sufficient licenses available when completing the re-enable action, you will receive an error. For more information about deprovisioning and disabling devices, visit the [help center](https://support.google.com/chrome/a/answer/3523633).", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{resourceId}/action", + "httpMethod": "POST", + "id": "directory.chromeosdevices.action", + "parameterOrder": [ + "customerId", + "resourceId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "resourceId": { + "description": "The unique ID of the device. The `resourceId`s are returned in the response from the [chromeosdevices.list](/admin-sdk/directory/v1/reference/chromeosdevices/list) method.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{resourceId}/action", + "request": { + "$ref": "ChromeOsDeviceAction" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.chromeos" + ] + }, + "get": { + "description": "Retrieves a Chrome OS device's properties.", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + "httpMethod": "GET", + "id": "directory.chromeosdevices.get", + "parameterOrder": [ + "customerId", + "deviceId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "deviceId": { + "description": "The unique ID of the device. The `deviceId`s are returned in the response from the [chromeosdevices.list](/admin-sdk/directory/v1/reference/chromeosdevices/list) method.", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Determines whether the response contains the full list of properties or only a subset.", + "enum": [ + "BASIC", + "FULL" + ], + "enumDescriptions": [ + "Includes only the basic metadata fields (e.g., deviceId, serialNumber, status, and user)", + "Includes all metadata fields" + ], + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + "response": { + "$ref": "ChromeOsDevice" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.chromeos", + "https://www.googleapis.com/auth/admin.directory.device.chromeos.readonly" + ] + }, + "list": { + "description": "Retrieves a paginated list of Chrome OS devices within an account.", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos", + "httpMethod": "GET", + "id": "directory.chromeosdevices.list", + "parameterOrder": [ + "customerId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "includeChildOrgunits": { + "description": "Return devices from all child orgunits, as well as the specified org unit. If this is set to true, 'orgUnitPath' must be provided.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "100", + "description": "Maximum number of results to return.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, + "orderBy": { + "description": "Device property to use for sorting results.", + "enum": [ + "annotatedLocation", + "annotatedUser", + "lastSync", + "notes", + "serialNumber", + "status" + ], + "enumDescriptions": [ + "Chrome device location as annotated by the administrator.", + "Chromebook user as annotated by administrator.", + "The date and time the Chrome device was last synchronized with the policy settings in the Admin console.", + "Chrome device notes as annotated by the administrator.", + "The Chrome device serial number entered when the device was enabled.", + "Chrome device status. For more information, see the \u003ca [chromeosdevices](/admin-sdk/directory/v1/reference/chromeosdevices.html)." + ], + "location": "query", + "type": "string" + }, + "orgUnitPath": { + "description": "The full path of the organizational unit (minus the leading `/`) or its unique ID.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "The `pageToken` query parameter is used to request the next page of query results. The follow-on request's `pageToken` query parameter is the `nextPageToken` from your previous response.", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Restrict information returned to a set of selected fields.", + "enum": [ + "BASIC", + "FULL" + ], + "enumDescriptions": [ + "Includes only the basic metadata fields (e.g., deviceId, serialNumber, status, and user)", + "Includes all metadata fields" + ], + "location": "query", + "type": "string" + }, + "query": { + "description": "Search string in the format given at https://developers.google.com/admin-sdk/directory/v1/list-query-operators", + "location": "query", + "type": "string" + }, + "sortOrder": { + "description": "Whether to return results in ascending or descending order. Must be used with the `orderBy` parameter.", + "enum": [ + "ASCENDING", + "DESCENDING" + ], + "enumDescriptions": [ + "Ascending order.", + "Descending order." + ], + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/chromeos", + "response": { + "$ref": "ChromeOsDevices" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.chromeos", + "https://www.googleapis.com/auth/admin.directory.device.chromeos.readonly" + ] + }, + "moveDevicesToOu": { + "description": "Moves or inserts multiple Chrome OS devices to an organizational unit. You can move up to 50 devices at once.", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/moveDevicesToOu", + "httpMethod": "POST", + "id": "directory.chromeosdevices.moveDevicesToOu", + "parameterOrder": [ + "customerId", + "orgUnitPath" + ], + "parameters": { + "customerId": { + "description": "Immutable. ID of the Google Workspace account", + "location": "path", + "required": true, + "type": "string" + }, + "orgUnitPath": { + "description": "Full path of the target organizational unit or its ID", + "location": "query", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/moveDevicesToOu", + "request": { + "$ref": "ChromeOsMoveDevicesToOu" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.chromeos" + ] + }, + "patch": { + "description": "Updates a device's updatable properties, such as `annotatedUser`, `annotatedLocation`, `notes`, `orgUnitPath`, or `annotatedAssetId`. This method supports [patch semantics](/admin-sdk/directory/v1/guides/performance#patch).", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + "httpMethod": "PATCH", + "id": "directory.chromeosdevices.patch", + "parameterOrder": [ + "customerId", + "deviceId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "deviceId": { + "description": "The unique ID of the device. The `deviceId`s are returned in the response from the [chromeosdevices.list](/admin-sdk/v1/reference/chromeosdevices/list) method.", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Restrict information returned to a set of selected fields.", + "enum": [ + "BASIC", + "FULL" + ], + "enumDescriptions": [ + "Includes only the basic metadata fields (e.g., deviceId, serialNumber, status, and user)", + "Includes all metadata fields" + ], + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + "request": { + "$ref": "ChromeOsDevice" + }, + "response": { + "$ref": "ChromeOsDevice" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.chromeos" + ] + }, + "update": { + "description": "Updates a device's updatable properties, such as `annotatedUser`, `annotatedLocation`, `notes`, `orgUnitPath`, or `annotatedAssetId`.", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + "httpMethod": "PUT", + "id": "directory.chromeosdevices.update", + "parameterOrder": [ + "customerId", + "deviceId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "deviceId": { + "description": "The unique ID of the device. The `deviceId`s are returned in the response from the [chromeosdevices.list](/admin-sdk/v1/reference/chromeosdevices/list) method.", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Restrict information returned to a set of selected fields.", + "enum": [ + "BASIC", + "FULL" + ], + "enumDescriptions": [ + "Includes only the basic metadata fields (e.g., deviceId, serialNumber, status, and user)", + "Includes all metadata fields" + ], + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + "request": { + "$ref": "ChromeOsDevice" + }, + "response": { + "$ref": "ChromeOsDevice" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.chromeos" + ] + } + } + }, + "customer": { + "resources": { + "devices": { + "resources": { + "chromeos": { + "methods": { + "issueCommand": { + "description": "Issues a command for the device to execute.", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}:issueCommand", + "httpMethod": "POST", + "id": "admin.customer.devices.chromeos.issueCommand", + "parameterOrder": [ + "customerId", + "deviceId" + ], + "parameters": { + "customerId": { + "description": "Immutable. ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "deviceId": { + "description": "Immutable. ID of Chrome OS Device.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}:issueCommand", + "request": { + "$ref": "DirectoryChromeosdevicesIssueCommandRequest" + }, + "response": { + "$ref": "DirectoryChromeosdevicesIssueCommandResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.chromeos" + ] + } + }, + "resources": { + "commands": { + "methods": { + "get": { + "description": "Gets command data a specific command issued to the device.", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}/commands/{commandId}", + "httpMethod": "GET", + "id": "admin.customer.devices.chromeos.commands.get", + "parameterOrder": [ + "customerId", + "deviceId", + "commandId" + ], + "parameters": { + "commandId": { + "description": "Immutable. ID of Chrome OS Device Command.", + "format": "int64", + "location": "path", + "required": true, + "type": "string" + }, + "customerId": { + "description": "Immutable. ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "deviceId": { + "description": "Immutable. ID of Chrome OS Device.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}/commands/{commandId}", + "response": { + "$ref": "DirectoryChromeosdevicesCommand" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.chromeos", + "https://www.googleapis.com/auth/admin.directory.device.chromeos.readonly" + ] + } + } + } + } + } + } + } + } + }, + "customers": { + "methods": { + "get": { + "description": "Retrieves a customer.", + "flatPath": "admin/directory/v1/customers/{customerKey}", + "httpMethod": "GET", + "id": "directory.customers.get", + "parameterOrder": [ + "customerKey" + ], + "parameters": { + "customerKey": { + "description": "Id of the customer to be retrieved", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customers/{customerKey}", + "response": { + "$ref": "Customer" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.customer", + "https://www.googleapis.com/auth/admin.directory.customer.readonly" + ] + }, + "patch": { + "description": "Patches a customer.", + "flatPath": "admin/directory/v1/customers/{customerKey}", + "httpMethod": "PATCH", + "id": "directory.customers.patch", + "parameterOrder": [ + "customerKey" + ], + "parameters": { + "customerKey": { + "description": "Id of the customer to be updated", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customers/{customerKey}", + "request": { + "$ref": "Customer" + }, + "response": { + "$ref": "Customer" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.customer" + ] + }, + "update": { + "description": "Updates a customer.", + "flatPath": "admin/directory/v1/customers/{customerKey}", + "httpMethod": "PUT", + "id": "directory.customers.update", + "parameterOrder": [ + "customerKey" + ], + "parameters": { + "customerKey": { + "description": "Id of the customer to be updated", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customers/{customerKey}", + "request": { + "$ref": "Customer" + }, + "response": { + "$ref": "Customer" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.customer" + ] + } + }, + "resources": { + "chrome": { + "resources": { + "printServers": { + "methods": { + "batchCreatePrintServers": { + "description": "Creates multiple print servers.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers:batchCreatePrintServers", + "httpMethod": "POST", + "id": "admin.customers.chrome.printServers.batchCreatePrintServers", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{id}`", + "location": "path", + "pattern": "^customers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+parent}/chrome/printServers:batchCreatePrintServers", + "request": { + "$ref": "BatchCreatePrintServersRequest" + }, + "response": { + "$ref": "BatchCreatePrintServersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers" + ] + }, + "batchDeletePrintServers": { + "description": "Deletes multiple print servers.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers:batchDeletePrintServers", + "httpMethod": "POST", + "id": "admin.customers.chrome.printServers.batchDeletePrintServers", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{customer.id}`", + "location": "path", + "pattern": "^customers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+parent}/chrome/printServers:batchDeletePrintServers", + "request": { + "$ref": "BatchDeletePrintServersRequest" + }, + "response": { + "$ref": "BatchDeletePrintServersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers" + ] + }, + "create": { + "description": "Creates a print server.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers", + "httpMethod": "POST", + "id": "admin.customers.chrome.printServers.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{id}`", + "location": "path", + "pattern": "^customers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+parent}/chrome/printServers", + "request": { + "$ref": "PrintServer" + }, + "response": { + "$ref": "PrintServer" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers" + ] + }, + "delete": { + "description": "Deletes a print server.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers/{printServersId}", + "httpMethod": "DELETE", + "id": "admin.customers.chrome.printServers.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the print server to be deleted. Format: `customers/{customer.id}/chrome/printServers/{print_server.id}`", + "location": "path", + "pattern": "^customers/[^/]+/chrome/printServers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers" + ] + }, + "get": { + "description": "Returns a print server's configuration.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers/{printServersId}", + "httpMethod": "GET", + "id": "admin.customers.chrome.printServers.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{id}`", + "location": "path", + "pattern": "^customers/[^/]+/chrome/printServers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+name}", + "response": { + "$ref": "PrintServer" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers", + "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + ] + }, + "list": { + "description": "Lists print server configurations.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers", + "httpMethod": "GET", + "id": "admin.customers.chrome.printServers.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Search query in [Common Expression Language syntax](https://github.com/google/cel-spec). Supported filters are `display_name`, `description`, and `uri`. Example: `printServer.displayName=='marketing-queue'`.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Sort order for results. Supported values are `display_name`, `description`, or `create_time`. Default order is ascending, but descending order can be returned by appending \"desc\" to the `order_by` field. For instance, `orderBy=='description desc'` returns the print servers sorted by description in descending order.", + "location": "query", + "type": "string" + }, + "orgUnitId": { + "description": "If `org_unit_id` is present in the request, only print servers owned or inherited by the organizational unit (OU) are returned. If the `PrintServer` resource's `org_unit_id` matches the one in the request, the OU owns the server. If `org_unit_id` is not specified in the request, all print servers are returned or filtered against.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "The maximum number of objects to return (default `100`, max `100`). The service might return fewer than this value.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A generated token to paginate results (the `next_page_token` from a previous call).", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{id}`", + "location": "path", + "pattern": "^customers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+parent}/chrome/printServers", + "response": { + "$ref": "ListPrintServersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers", + "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + ] + }, + "patch": { + "description": "Updates a print server's configuration.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers/{printServersId}", + "httpMethod": "PATCH", + "id": "admin.customers.chrome.printServers.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Immutable. Resource name of the print server. Leave empty when creating. Format: `customers/{customer.id}/printServers/{print_server.id}`", + "location": "path", + "pattern": "^customers/[^/]+/chrome/printServers/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "The list of fields to update. Some fields are read-only and cannot be updated. Values for unspecified fields are patched.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/{+name}", + "request": { + "$ref": "PrintServer" + }, + "response": { + "$ref": "PrintServer" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers" + ] + } + } + }, + "printers": { + "methods": { + "batchCreatePrinters": { + "description": "Creates printers under given Organization Unit.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers:batchCreatePrinters", + "httpMethod": "POST", + "id": "admin.customers.chrome.printers.batchCreatePrinters", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the customer. Format: customers/{customer_id}", + "location": "path", + "pattern": "^customers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+parent}/chrome/printers:batchCreatePrinters", + "request": { + "$ref": "BatchCreatePrintersRequest" + }, + "response": { + "$ref": "BatchCreatePrintersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers" + ] + }, + "batchDeletePrinters": { + "description": "Deletes printers in batch.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers:batchDeletePrinters", + "httpMethod": "POST", + "id": "admin.customers.chrome.printers.batchDeletePrinters", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the customer. Format: customers/{customer_id}", + "location": "path", + "pattern": "^customers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+parent}/chrome/printers:batchDeletePrinters", + "request": { + "$ref": "BatchDeletePrintersRequest" + }, + "response": { + "$ref": "BatchDeletePrintersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers" + ] + }, + "create": { + "description": "Creates a printer under given Organization Unit.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers", + "httpMethod": "POST", + "id": "admin.customers.chrome.printers.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the customer. Format: customers/{customer_id}", + "location": "path", + "pattern": "^customers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+parent}/chrome/printers", + "request": { + "$ref": "Printer" + }, + "response": { + "$ref": "Printer" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers" + ] + }, + "delete": { + "description": "Deletes a `Printer`.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers/{printersId}", + "httpMethod": "DELETE", + "id": "admin.customers.chrome.printers.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the printer to be updated. Format: customers/{customer_id}/chrome/printers/{printer_id}", + "location": "path", + "pattern": "^customers/[^/]+/chrome/printers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers" + ] + }, + "get": { + "description": "Returns a `Printer` resource (printer's config).", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers/{printersId}", + "httpMethod": "GET", + "id": "admin.customers.chrome.printers.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the printer to retrieve. Format: customers/{customer_id}/chrome/printers/{printer_id}", + "location": "path", + "pattern": "^customers/[^/]+/chrome/printers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+name}", + "response": { + "$ref": "Printer" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers", + "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + ] + }, + "list": { + "description": "List printers configs.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers", + "httpMethod": "GET", + "id": "admin.customers.chrome.printers.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Search query. Search syntax is shared between this api and Admin Console printers pages.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "The order to sort results by. Must be one of display_name, description, make_and_model, or create_time. Default order is ascending, but descending order can be returned by appending \"desc\" to the order_by field. For instance, \"description desc\" will return the printers sorted by description in descending order.", + "location": "query", + "type": "string" + }, + "orgUnitId": { + "description": "Organization Unit that we want to list the printers for. When org_unit is not present in the request then all printers of the customer are returned (or filtered). When org_unit is present in the request then only printers available to this OU will be returned (owned or inherited). You may see if printer is owned or inherited for this OU by looking at Printer.org_unit_id.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "The maximum number of objects to return. The service may return fewer than this value.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A page token, received from a previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The name of the customer who owns this collection of printers. Format: customers/{customer_id}", + "location": "path", + "pattern": "^customers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+parent}/chrome/printers", + "response": { + "$ref": "ListPrintersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers", + "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + ] + }, + "listPrinterModels": { + "description": "Lists the supported printer models.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers:listPrinterModels", + "httpMethod": "GET", + "id": "admin.customers.chrome.printers.listPrinterModels", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Filer to list only models by a given manufacturer in format: \"manufacturer:Brother\". Search syntax is shared between this api and Admin Console printers pages.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "The maximum number of objects to return. The service may return fewer than this value.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A page token, received from a previous call.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The name of the customer who owns this collection of printers. Format: customers/{customer_id}", + "location": "path", + "pattern": "^customers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/{+parent}/chrome/printers:listPrinterModels", + "response": { + "$ref": "ListPrinterModelsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers", + "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + ] + }, + "patch": { + "description": "Updates a `Printer` resource.", + "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers/{printersId}", + "httpMethod": "PATCH", + "id": "admin.customers.chrome.printers.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "clearMask": { + "description": "The list of fields to be cleared. Note, some of the fields are read only and cannot be updated. Values for not specified fields will be patched.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + }, + "name": { + "description": "The resource name of the Printer object, in the format customers/{customer-id}/printers/{printer-id} (During printer creation leave empty)", + "location": "path", + "pattern": "^customers/[^/]+/chrome/printers/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "The list of fields to be updated. Note, some of the fields are read only and cannot be updated. Values for not specified fields will be patched.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/{+name}", + "request": { + "$ref": "Printer" + }, + "response": { + "$ref": "Printer" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.chrome.printers" + ] + } + } + } + } + } + } + }, + "domainAliases": { + "methods": { + "delete": { + "description": "Deletes a domain Alias of the customer.", + "flatPath": "admin/directory/v1/customer/{customer}/domainaliases/{domainAliasName}", + "httpMethod": "DELETE", + "id": "directory.domainAliases.delete", + "parameterOrder": [ + "customer", + "domainAliasName" + ], + "parameters": { + "customer": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "domainAliasName": { + "description": "Name of domain alias to be retrieved.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/domainaliases/{domainAliasName}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.domain" + ] + }, + "get": { + "description": "Retrieves a domain alias of the customer.", + "flatPath": "admin/directory/v1/customer/{customer}/domainaliases/{domainAliasName}", + "httpMethod": "GET", + "id": "directory.domainAliases.get", + "parameterOrder": [ + "customer", + "domainAliasName" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + }, + "domainAliasName": { + "description": "Name of domain alias to be retrieved.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/domainaliases/{domainAliasName}", + "response": { + "$ref": "DomainAlias" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.domain", + "https://www.googleapis.com/auth/admin.directory.domain.readonly" + ] + }, + "insert": { + "description": "Inserts a domain alias of the customer.", + "flatPath": "admin/directory/v1/customer/{customer}/domainaliases", + "httpMethod": "POST", + "id": "directory.domainAliases.insert", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/domainaliases", + "request": { + "$ref": "DomainAlias" + }, + "response": { + "$ref": "DomainAlias" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.domain" + ] + }, + "list": { + "description": "Lists the domain aliases of the customer.", + "flatPath": "admin/directory/v1/customer/{customer}/domainaliases", + "httpMethod": "GET", + "id": "directory.domainAliases.list", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + }, + "parentDomainName": { + "description": "Name of the parent domain for which domain aliases are to be fetched.", + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/domainaliases", + "response": { + "$ref": "DomainAliases" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.domain", + "https://www.googleapis.com/auth/admin.directory.domain.readonly" + ] + } + } + }, + "domains": { + "methods": { + "delete": { + "description": "Deletes a domain of the customer.", + "flatPath": "admin/directory/v1/customer/{customer}/domains/{domainName}", + "httpMethod": "DELETE", + "id": "directory.domains.delete", + "parameterOrder": [ + "customer", + "domainName" + ], + "parameters": { + "customer": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "domainName": { + "description": "Name of domain to be deleted", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/domains/{domainName}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.domain" + ] + }, + "get": { + "description": "Retrieves a domain of the customer.", + "flatPath": "admin/directory/v1/customer/{customer}/domains/{domainName}", + "httpMethod": "GET", + "id": "directory.domains.get", + "parameterOrder": [ + "customer", + "domainName" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + }, + "domainName": { + "description": "Name of domain to be retrieved", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/domains/{domainName}", + "response": { + "$ref": "Domains" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.domain", + "https://www.googleapis.com/auth/admin.directory.domain.readonly" + ] + }, + "insert": { + "description": "Inserts a domain of the customer.", + "flatPath": "admin/directory/v1/customer/{customer}/domains", + "httpMethod": "POST", + "id": "directory.domains.insert", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/domains", + "request": { + "$ref": "Domains" + }, + "response": { + "$ref": "Domains" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.domain" + ] + }, + "list": { + "description": "Lists the domains of the customer.", + "flatPath": "admin/directory/v1/customer/{customer}/domains", + "httpMethod": "GET", + "id": "directory.domains.list", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/domains", + "response": { + "$ref": "Domains2" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.domain", + "https://www.googleapis.com/auth/admin.directory.domain.readonly" + ] + } + } + }, + "groups": { + "methods": { + "delete": { + "description": "Deletes a group.", + "flatPath": "admin/directory/v1/groups/{groupKey}", + "httpMethod": "DELETE", + "id": "directory.groups.delete", + "parameterOrder": [ + "groupKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group" + ] + }, + "get": { + "description": "Retrieves a group's properties.", + "flatPath": "admin/directory/v1/groups/{groupKey}", + "httpMethod": "GET", + "id": "directory.groups.get", + "parameterOrder": [ + "groupKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}", + "response": { + "$ref": "Group" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.readonly" + ] + }, + "insert": { + "description": "Creates a group.", + "flatPath": "admin/directory/v1/groups", + "httpMethod": "POST", + "id": "directory.groups.insert", + "parameterOrder": [], + "parameters": {}, + "path": "admin/directory/v1/groups", + "request": { + "$ref": "Group" + }, + "response": { + "$ref": "Group" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group" + ] + }, + "list": { + "description": "Retrieves all groups of a domain or of a user given a userKey (paginated).", + "flatPath": "admin/directory/v1/groups", + "httpMethod": "GET", + "id": "directory.groups.list", + "parameterOrder": [], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "query", + "type": "string" + }, + "domain": { + "description": "The domain name. Use this field to get groups from only one domain. To return all domains for a customer account, use the `customer` query parameter instead.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "200", + "description": "Maximum number of results to return. Max allowed value is 200.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, + "orderBy": { + "description": "Column to use for sorting results", + "enum": [ + "email" + ], + "enumDescriptions": [ + "Email of the group." + ], + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Token to specify next page in the list", + "location": "query", + "type": "string" + }, + "query": { + "description": "Query string search. Should be of the form \"\". Complete documentation is at https: //developers.google.com/admin-sdk/directory/v1/guides/search-groups", + "location": "query", + "type": "string" + }, + "sortOrder": { + "description": "Whether to return results in ascending or descending order. Only of use when orderBy is also used", + "enum": [ + "ASCENDING", + "DESCENDING" + ], + "enumDescriptions": [ + "Ascending order.", + "Descending order." + ], + "location": "query", + "type": "string" + }, + "userKey": { + "description": "Email or immutable ID of the user if only those groups are to be listed, the given user is a member of. If it's an ID, it should match with the ID of the user object.", + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/groups", + "response": { + "$ref": "Groups" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.readonly" + ] + }, + "patch": { + "description": "Updates a group's properties. This method supports [patch semantics](/admin-sdk/directory/v1/guides/performance#patch).", + "flatPath": "admin/directory/v1/groups/{groupKey}", + "httpMethod": "PATCH", + "id": "directory.groups.patch", + "parameterOrder": [ + "groupKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}", + "request": { + "$ref": "Group" + }, + "response": { + "$ref": "Group" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group" + ] + }, + "update": { + "description": "Updates a group's properties.", + "flatPath": "admin/directory/v1/groups/{groupKey}", + "httpMethod": "PUT", + "id": "directory.groups.update", + "parameterOrder": [ + "groupKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}", + "request": { + "$ref": "Group" + }, + "response": { + "$ref": "Group" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group" + ] + } + }, + "resources": { + "aliases": { + "methods": { + "delete": { + "description": "Removes an alias.", + "flatPath": "admin/directory/v1/groups/{groupKey}/aliases/{alias}", + "httpMethod": "DELETE", + "id": "directory.groups.aliases.delete", + "parameterOrder": [ + "groupKey", + "alias" + ], + "parameters": { + "alias": { + "description": "The alias to be removed", + "location": "path", + "required": true, + "type": "string" + }, + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}/aliases/{alias}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group" + ] + }, + "insert": { + "description": "Adds an alias for the group.", + "flatPath": "admin/directory/v1/groups/{groupKey}/aliases", + "httpMethod": "POST", + "id": "directory.groups.aliases.insert", + "parameterOrder": [ + "groupKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}/aliases", + "request": { + "$ref": "Alias" + }, + "response": { + "$ref": "Alias" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group" + ] + }, + "list": { + "description": "Lists all aliases for a group.", + "flatPath": "admin/directory/v1/groups/{groupKey}/aliases", + "httpMethod": "GET", + "id": "directory.groups.aliases.list", + "parameterOrder": [ + "groupKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}/aliases", + "response": { + "$ref": "Aliases" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.readonly" + ] + } + } + } + } + }, + "members": { + "methods": { + "delete": { + "description": "Removes a member from a group.", + "flatPath": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + "httpMethod": "DELETE", + "id": "directory.members.delete", + "parameterOrder": [ + "groupKey", + "memberKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + }, + "memberKey": { + "description": "Identifies the group member in the API request. A group member can be a user or another group. The value can be the member's (group or user) primary email address, alias, or unique ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.member" + ] + }, + "get": { + "description": "Retrieves a group member's properties.", + "flatPath": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + "httpMethod": "GET", + "id": "directory.members.get", + "parameterOrder": [ + "groupKey", + "memberKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + }, + "memberKey": { + "description": "Identifies the group member in the API request. A group member can be a user or another group. The value can be the member's (group or user) primary email address, alias, or unique ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + "response": { + "$ref": "Member" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.member", + "https://www.googleapis.com/auth/admin.directory.group.member.readonly", + "https://www.googleapis.com/auth/admin.directory.group.readonly" + ] + }, + "hasMember": { + "description": "Checks whether the given user is a member of the group. Membership can be direct or nested, but if nested, the `memberKey` and `groupKey` must be entities in the same domain or an `Invalid input` error is returned. To check for nested memberships that include entities outside of the group's domain, use the [`checkTransitiveMembership()`](https://cloud.google.com/identity/docs/reference/rest/v1/groups.memberships/checkTransitiveMembership) method in the Cloud Identity Groups API.", + "flatPath": "admin/directory/v1/groups/{groupKey}/hasMember/{memberKey}", + "httpMethod": "GET", + "id": "directory.members.hasMember", + "parameterOrder": [ + "groupKey", + "memberKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + }, + "memberKey": { + "description": "Identifies the user member in the API request. The value can be the user's primary email address, alias, or unique ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}/hasMember/{memberKey}", + "response": { + "$ref": "MembersHasMember" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.member", + "https://www.googleapis.com/auth/admin.directory.group.member.readonly", + "https://www.googleapis.com/auth/admin.directory.group.readonly" + ] + }, + "insert": { + "description": "Adds a user to the specified group.", + "flatPath": "admin/directory/v1/groups/{groupKey}/members", + "httpMethod": "POST", + "id": "directory.members.insert", + "parameterOrder": [ + "groupKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}/members", + "request": { + "$ref": "Member" + }, + "response": { + "$ref": "Member" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.member" + ] + }, + "list": { + "description": "Retrieves a paginated list of all members in a group. This method times out after 60 minutes. For more information, see [Troubleshoot error codes](https://developers.google.com/admin-sdk/directory/v1/guides/troubleshoot-error-codes).", + "flatPath": "admin/directory/v1/groups/{groupKey}/members", + "httpMethod": "GET", + "id": "directory.members.list", + "parameterOrder": [ + "groupKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + }, + "includeDerivedMembership": { + "description": "Whether to list indirect memberships. Default: false.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "200", + "description": "Maximum number of results to return. Max allowed value is 200.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "Token to specify next page in the list.", + "location": "query", + "type": "string" + }, + "roles": { + "description": "The `roles` query parameter allows you to retrieve group members by role. Allowed values are `OWNER`, `MANAGER`, and `MEMBER`.", + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}/members", + "response": { + "$ref": "Members" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.member", + "https://www.googleapis.com/auth/admin.directory.group.member.readonly", + "https://www.googleapis.com/auth/admin.directory.group.readonly" + ] + }, + "patch": { + "description": "Updates the membership properties of a user in the specified group. This method supports [patch semantics](/admin-sdk/directory/v1/guides/performance#patch).", + "flatPath": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + "httpMethod": "PATCH", + "id": "directory.members.patch", + "parameterOrder": [ + "groupKey", + "memberKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + }, + "memberKey": { + "description": "Identifies the group member in the API request. A group member can be a user or another group. The value can be the member's (group or user) primary email address, alias, or unique ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + "request": { + "$ref": "Member" + }, + "response": { + "$ref": "Member" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.member" + ] + }, + "update": { + "description": "Updates the membership of a user in the specified group.", + "flatPath": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + "httpMethod": "PUT", + "id": "directory.members.update", + "parameterOrder": [ + "groupKey", + "memberKey" + ], + "parameters": { + "groupKey": { + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + "location": "path", + "required": true, + "type": "string" + }, + "memberKey": { + "description": "Identifies the group member in the API request. A group member can be a user or another group. The value can be the member's (group or user) primary email address, alias, or unique ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + "request": { + "$ref": "Member" + }, + "response": { + "$ref": "Member" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.member" + ] + } + } + }, + "mobiledevices": { + "methods": { + "action": { + "description": "Takes an action that affects a mobile device. For example, remotely wiping a device.", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}/action", + "httpMethod": "POST", + "id": "directory.mobiledevices.action", + "parameterOrder": [ + "customerId", + "resourceId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "resourceId": { + "description": "The unique ID the API service uses to identify the mobile device.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}/action", + "request": { + "$ref": "MobileDeviceAction" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.mobile", + "https://www.googleapis.com/auth/admin.directory.device.mobile.action" + ] + }, + "delete": { + "description": "Removes a mobile device.", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}", + "httpMethod": "DELETE", + "id": "directory.mobiledevices.delete", + "parameterOrder": [ + "customerId", + "resourceId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "resourceId": { + "description": "The unique ID the API service uses to identify the mobile device.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.mobile" + ] + }, + "get": { + "description": "Retrieves a mobile device's properties.", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}", + "httpMethod": "GET", + "id": "directory.mobiledevices.get", + "parameterOrder": [ + "customerId", + "resourceId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Restrict information returned to a set of selected fields.", + "enum": [ + "BASIC", + "FULL" + ], + "enumDescriptions": [ + "Includes only the basic metadata fields (e.g., deviceId, model, status, type, and status)", + "Includes all metadata fields" + ], + "location": "query", + "type": "string" + }, + "resourceId": { + "description": "The unique ID the API service uses to identify the mobile device.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}", + "response": { + "$ref": "MobileDevice" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.mobile", + "https://www.googleapis.com/auth/admin.directory.device.mobile.action", + "https://www.googleapis.com/auth/admin.directory.device.mobile.readonly" + ] + }, + "list": { + "description": "Retrieves a paginated list of all user-owned mobile devices for an account. To retrieve a list that includes company-owned devices, use the Cloud Identity [Devices API](https://cloud.google.com/identity/docs/concepts/overview-devices) instead. This method times out after 60 minutes. For more information, see [Troubleshoot error codes](https://developers.google.com/admin-sdk/directory/v1/guides/troubleshoot-error-codes).", + "flatPath": "admin/directory/v1/customer/{customerId}/devices/mobile", + "httpMethod": "GET", + "id": "directory.mobiledevices.list", + "parameterOrder": [ + "customerId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "default": "100", + "description": "Maximum number of results to return. Max allowed value is 100.", + "format": "int32", + "location": "query", + "maximum": "100", + "minimum": "1", + "type": "integer" + }, + "orderBy": { + "description": "Device property to use for sorting results.", + "enum": [ + "deviceId", + "email", + "lastSync", + "model", + "name", + "os", + "status", + "type" + ], + "enumDescriptions": [ + "The serial number for a Google Sync mobile device. For Android devices, this is a software generated unique identifier.", + "The device owner's email address.", + "Last policy settings sync date time of the device.", + "The mobile device's model.", + "The device owner's user name.", + "The device's operating system.", + "The device status.", + "Type of the device." + ], + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Token to specify next page in the list", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Restrict information returned to a set of selected fields.", + "enum": [ + "BASIC", + "FULL" + ], + "enumDescriptions": [ + "Includes only the basic metadata fields (e.g., deviceId, model, status, type, and status)", + "Includes all metadata fields" + ], + "location": "query", + "type": "string" + }, + "query": { + "description": "Search string in the format given at https://developers.google.com/admin-sdk/directory/v1/search-operators", + "location": "query", + "type": "string" + }, + "sortOrder": { + "description": "Whether to return results in ascending or descending order. Must be used with the `orderBy` parameter.", + "enum": [ + "ASCENDING", + "DESCENDING" + ], + "enumDescriptions": [ + "Ascending order.", + "Descending order." + ], + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/devices/mobile", + "response": { + "$ref": "MobileDevices" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.device.mobile", + "https://www.googleapis.com/auth/admin.directory.device.mobile.action", + "https://www.googleapis.com/auth/admin.directory.device.mobile.readonly" + ] + } + } + }, + "orgunits": { + "methods": { + "delete": { + "description": "Removes an organizational unit.", + "flatPath": "admin/directory/v1/customer/{customerId}/orgunits/{orgunitsId}", + "httpMethod": "DELETE", + "id": "directory.orgunits.delete", + "parameterOrder": [ + "customerId", + "orgUnitPath" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "orgUnitPath": { + "description": "The full path of the organizational unit (minus the leading `/`) or its unique ID.", + "location": "path", + "pattern": "^.*$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.orgunit" + ] + }, + "get": { + "description": "Retrieves an organizational unit.", + "flatPath": "admin/directory/v1/customer/{customerId}/orgunits/{orgunitsId}", + "httpMethod": "GET", + "id": "directory.orgunits.get", + "parameterOrder": [ + "customerId", + "orgUnitPath" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "orgUnitPath": { + "description": "The full path of the organizational unit (minus the leading `/`) or its unique ID.", + "location": "path", + "pattern": "^.*$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}", + "response": { + "$ref": "OrgUnit" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.orgunit", + "https://www.googleapis.com/auth/admin.directory.orgunit.readonly" + ] + }, + "insert": { + "description": "Adds an organizational unit.", + "flatPath": "admin/directory/v1/customer/{customerId}/orgunits", + "httpMethod": "POST", + "id": "directory.orgunits.insert", + "parameterOrder": [ + "customerId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/orgunits", + "request": { + "$ref": "OrgUnit" + }, + "response": { + "$ref": "OrgUnit" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.orgunit" + ] + }, + "list": { + "description": "Retrieves a list of all organizational units for an account.", + "flatPath": "admin/directory/v1/customer/{customerId}/orgunits", + "httpMethod": "GET", + "id": "directory.orgunits.list", + "parameterOrder": [ + "customerId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "orgUnitPath": { + "default": "", + "description": "The full path to the organizational unit or its unique ID. Returns the children of the specified organizational unit.", + "location": "query", + "type": "string" + }, + "type": { + "description": "Whether to return all sub-organizations or just immediate children.", + "enum": [ + "all", + "children" + ], + "enumDescriptions": [ + "All sub-organizational units.", + "Immediate children only (default)." + ], + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/orgunits", + "response": { + "$ref": "OrgUnits" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.orgunit", + "https://www.googleapis.com/auth/admin.directory.orgunit.readonly" + ] + }, + "patch": { + "description": "Updates an organizational unit. This method supports [patch semantics](/admin-sdk/directory/v1/guides/performance#patch)", + "flatPath": "admin/directory/v1/customer/{customerId}/orgunits/{orgunitsId}", + "httpMethod": "PATCH", + "id": "directory.orgunits.patch", + "parameterOrder": [ + "customerId", + "orgUnitPath" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "orgUnitPath": { + "description": "The full path of the organizational unit (minus the leading `/`) or its unique ID.", + "location": "path", + "pattern": "^.*$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}", + "request": { + "$ref": "OrgUnit" + }, + "response": { + "$ref": "OrgUnit" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.orgunit" + ] + }, + "update": { + "description": "Updates an organizational unit.", + "flatPath": "admin/directory/v1/customer/{customerId}/orgunits/{orgunitsId}", + "httpMethod": "PUT", + "id": "directory.orgunits.update", + "parameterOrder": [ + "customerId", + "orgUnitPath" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + "location": "path", + "required": true, + "type": "string" + }, + "orgUnitPath": { + "description": "The full path of the organizational unit (minus the leading `/`) or its unique ID.", + "location": "path", + "pattern": "^.*$", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}", + "request": { + "$ref": "OrgUnit" + }, + "response": { + "$ref": "OrgUnit" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.orgunit" + ] + } + } + }, + "privileges": { + "methods": { + "list": { + "description": "Retrieves a paginated list of all privileges for a customer.", + "flatPath": "admin/directory/v1/customer/{customer}/roles/ALL/privileges", + "httpMethod": "GET", + "id": "directory.privileges.list", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roles/ALL/privileges", + "response": { + "$ref": "Privileges" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement", + "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + ] + } + } + }, + "resources": { + "resources": { + "buildings": { + "methods": { + "delete": { + "description": "Deletes a building.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + "httpMethod": "DELETE", + "id": "directory.resources.buildings.delete", + "parameterOrder": [ + "customer", + "buildingId" + ], + "parameters": { + "buildingId": { + "description": "The id of the building to delete.", + "location": "path", + "required": true, + "type": "string" + }, + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + }, + "get": { + "description": "Retrieves a building.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + "httpMethod": "GET", + "id": "directory.resources.buildings.get", + "parameterOrder": [ + "customer", + "buildingId" + ], + "parameters": { + "buildingId": { + "description": "The unique ID of the building to retrieve.", + "location": "path", + "required": true, + "type": "string" + }, + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + "response": { + "$ref": "Building" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar", + "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + ] + }, + "insert": { + "description": "Inserts a building.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings", + "httpMethod": "POST", + "id": "directory.resources.buildings.insert", + "parameterOrder": [ + "customer" + ], + "parameters": { + "coordinatesSource": { + "default": "SOURCE_UNSPECIFIED", + "description": "Source from which Building.coordinates are derived.", + "enum": [ + "CLIENT_SPECIFIED", + "RESOLVED_FROM_ADDRESS", + "SOURCE_UNSPECIFIED" + ], + "enumDescriptions": [ + "Building.coordinates are set to the coordinates included in the request.", + "Building.coordinates are automatically populated based on the postal address.", + "Defaults to `RESOLVED_FROM_ADDRESS` if postal address is provided. Otherwise, defaults to `CLIENT_SPECIFIED` if coordinates are provided." + ], + "location": "query", + "type": "string" + }, + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/buildings", + "request": { + "$ref": "Building" + }, + "response": { + "$ref": "Building" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + }, + "list": { + "description": "Retrieves a list of buildings for an account.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings", + "httpMethod": "GET", + "id": "directory.resources.buildings.list", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "description": "Maximum number of results to return.", + "format": "int32", + "location": "query", + "maximum": "500", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "Token to specify the next page in the list.", + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/buildings", + "response": { + "$ref": "Buildings" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar", + "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + ] + }, + "patch": { + "description": "Patches a building.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + "httpMethod": "PATCH", + "id": "directory.resources.buildings.patch", + "parameterOrder": [ + "customer", + "buildingId" + ], + "parameters": { + "buildingId": { + "description": "The id of the building to update.", + "location": "path", + "required": true, + "type": "string" + }, + "coordinatesSource": { + "default": "SOURCE_UNSPECIFIED", + "description": "Source from which Building.coordinates are derived.", + "enum": [ + "CLIENT_SPECIFIED", + "RESOLVED_FROM_ADDRESS", + "SOURCE_UNSPECIFIED" + ], + "enumDescriptions": [ + "Building.coordinates are set to the coordinates included in the request.", + "Building.coordinates are automatically populated based on the postal address.", + "Defaults to `RESOLVED_FROM_ADDRESS` if postal address is provided. Otherwise, defaults to `CLIENT_SPECIFIED` if coordinates are provided." + ], + "location": "query", + "type": "string" + }, + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + "request": { + "$ref": "Building" + }, + "response": { + "$ref": "Building" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + }, + "update": { + "description": "Updates a building.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + "httpMethod": "PUT", + "id": "directory.resources.buildings.update", + "parameterOrder": [ + "customer", + "buildingId" + ], + "parameters": { + "buildingId": { + "description": "The id of the building to update.", + "location": "path", + "required": true, + "type": "string" + }, + "coordinatesSource": { + "default": "SOURCE_UNSPECIFIED", + "description": "Source from which Building.coordinates are derived.", + "enum": [ + "CLIENT_SPECIFIED", + "RESOLVED_FROM_ADDRESS", + "SOURCE_UNSPECIFIED" + ], + "enumDescriptions": [ + "Building.coordinates are set to the coordinates included in the request.", + "Building.coordinates are automatically populated based on the postal address.", + "Defaults to `RESOLVED_FROM_ADDRESS` if postal address is provided. Otherwise, defaults to `CLIENT_SPECIFIED` if coordinates are provided." + ], + "location": "query", + "type": "string" + }, + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + "request": { + "$ref": "Building" + }, + "response": { + "$ref": "Building" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + } + } + }, + "calendars": { + "methods": { + "delete": { + "description": "Deletes a calendar resource.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + "httpMethod": "DELETE", + "id": "directory.resources.calendars.delete", + "parameterOrder": [ + "customer", + "calendarResourceId" + ], + "parameters": { + "calendarResourceId": { + "description": "The unique ID of the calendar resource to delete.", + "location": "path", + "required": true, + "type": "string" + }, + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + }, + "get": { + "description": "Retrieves a calendar resource.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + "httpMethod": "GET", + "id": "directory.resources.calendars.get", + "parameterOrder": [ + "customer", + "calendarResourceId" + ], + "parameters": { + "calendarResourceId": { + "description": "The unique ID of the calendar resource to retrieve.", + "location": "path", + "required": true, + "type": "string" + }, + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + "response": { + "$ref": "CalendarResource" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar", + "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + ] + }, + "insert": { + "description": "Inserts a calendar resource.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars", + "httpMethod": "POST", + "id": "directory.resources.calendars.insert", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/calendars", + "request": { + "$ref": "CalendarResource" + }, + "response": { + "$ref": "CalendarResource" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + }, + "list": { + "description": "Retrieves a list of calendar resources for an account.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars", + "httpMethod": "GET", + "id": "directory.resources.calendars.list", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "description": "Maximum number of results to return.", + "format": "int32", + "location": "query", + "maximum": "500", + "minimum": "1", + "type": "integer" + }, + "orderBy": { + "description": "Field(s) to sort results by in either ascending or descending order. Supported fields include `resourceId`, `resourceName`, `capacity`, `buildingId`, and `floorName`. If no order is specified, defaults to ascending. Should be of the form \"field [asc|desc], field [asc|desc], ...\". For example `buildingId, capacity desc` would return results sorted first by `buildingId` in ascending order then by `capacity` in descending order.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Token to specify the next page in the list.", + "location": "query", + "type": "string" + }, + "query": { + "description": "String query used to filter results. Should be of the form \"field operator value\" where field can be any of supported fields and operators can be any of supported operations. Operators include '=' for exact match, '!=' for mismatch and ':' for prefix match or HAS match where applicable. For prefix match, the value should always be followed by a *. Logical operators NOT and AND are supported (in this order of precedence). Supported fields include `generatedResourceName`, `name`, `buildingId`, `floor_name`, `capacity`, `featureInstances.feature.name`, `resourceEmail`, `resourceCategory`. For example `buildingId=US-NYC-9TH AND featureInstances.feature.name:Phone`.", + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/calendars", + "response": { + "$ref": "CalendarResources" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar", + "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + ] + }, + "patch": { + "description": "Patches a calendar resource.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + "httpMethod": "PATCH", + "id": "directory.resources.calendars.patch", + "parameterOrder": [ + "customer", + "calendarResourceId" + ], + "parameters": { + "calendarResourceId": { + "description": "The unique ID of the calendar resource to update.", + "location": "path", + "required": true, + "type": "string" + }, + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + "request": { + "$ref": "CalendarResource" + }, + "response": { + "$ref": "CalendarResource" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + }, + "update": { + "description": "Updates a calendar resource. This method supports patch semantics, meaning you only need to include the fields you wish to update. Fields that are not present in the request will be preserved.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + "httpMethod": "PUT", + "id": "directory.resources.calendars.update", + "parameterOrder": [ + "customer", + "calendarResourceId" + ], + "parameters": { + "calendarResourceId": { + "description": "The unique ID of the calendar resource to update.", + "location": "path", + "required": true, + "type": "string" + }, + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + "request": { + "$ref": "CalendarResource" + }, + "response": { + "$ref": "CalendarResource" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + } + } + }, + "features": { + "methods": { + "delete": { + "description": "Deletes a feature.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + "httpMethod": "DELETE", + "id": "directory.resources.features.delete", + "parameterOrder": [ + "customer", + "featureKey" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + }, + "featureKey": { + "description": "The unique ID of the feature to delete.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + }, + "get": { + "description": "Retrieves a feature.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + "httpMethod": "GET", + "id": "directory.resources.features.get", + "parameterOrder": [ + "customer", + "featureKey" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + }, + "featureKey": { + "description": "The unique ID of the feature to retrieve.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + "response": { + "$ref": "Feature" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar", + "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + ] + }, + "insert": { + "description": "Inserts a feature.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/features", + "httpMethod": "POST", + "id": "directory.resources.features.insert", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/features", + "request": { + "$ref": "Feature" + }, + "response": { + "$ref": "Feature" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + }, + "list": { + "description": "Retrieves a list of features for an account.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/features", + "httpMethod": "GET", + "id": "directory.resources.features.list", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "description": "Maximum number of results to return.", + "format": "int32", + "location": "query", + "maximum": "500", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "Token to specify the next page in the list.", + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/features", + "response": { + "$ref": "Features" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar", + "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + ] + }, + "patch": { + "description": "Patches a feature.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + "httpMethod": "PATCH", + "id": "directory.resources.features.patch", + "parameterOrder": [ + "customer", + "featureKey" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + }, + "featureKey": { + "description": "The unique ID of the feature to update.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + "request": { + "$ref": "Feature" + }, + "response": { + "$ref": "Feature" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + }, + "rename": { + "description": "Renames a feature.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/features/{oldName}/rename", + "httpMethod": "POST", + "id": "directory.resources.features.rename", + "parameterOrder": [ + "customer", + "oldName" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + }, + "oldName": { + "description": "The unique ID of the feature to rename.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/features/{oldName}/rename", + "request": { + "$ref": "FeatureRename" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + }, + "update": { + "description": "Updates a feature.", + "flatPath": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + "httpMethod": "PUT", + "id": "directory.resources.features.update", + "parameterOrder": [ + "customer", + "featureKey" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + "location": "path", + "required": true, + "type": "string" + }, + "featureKey": { + "description": "The unique ID of the feature to update.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + "request": { + "$ref": "Feature" + }, + "response": { + "$ref": "Feature" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.resource.calendar" + ] + } + } + } + } + }, + "roleAssignments": { + "methods": { + "delete": { + "description": "Deletes a role assignment.", + "flatPath": "admin/directory/v1/customer/{customer}/roleassignments/{roleAssignmentId}", + "httpMethod": "DELETE", + "id": "directory.roleAssignments.delete", + "parameterOrder": [ + "customer", + "roleAssignmentId" + ], + "parameters": { + "customer": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "roleAssignmentId": { + "description": "Immutable ID of the role assignment.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roleassignments/{roleAssignmentId}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement" + ] + }, + "get": { + "description": "Retrieves a role assignment.", + "flatPath": "admin/directory/v1/customer/{customer}/roleassignments/{roleAssignmentId}", + "httpMethod": "GET", + "id": "directory.roleAssignments.get", + "parameterOrder": [ + "customer", + "roleAssignmentId" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + }, + "roleAssignmentId": { + "description": "Immutable ID of the role assignment.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roleassignments/{roleAssignmentId}", + "response": { + "$ref": "RoleAssignment" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement", + "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + ] + }, + "insert": { + "description": "Creates a role assignment.", + "flatPath": "admin/directory/v1/customer/{customer}/roleassignments", + "httpMethod": "POST", + "id": "directory.roleAssignments.insert", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roleassignments", + "request": { + "$ref": "RoleAssignment" + }, + "response": { + "$ref": "RoleAssignment" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement" + ] + }, + "list": { + "description": "Retrieves a paginated list of all roleAssignments.", + "flatPath": "admin/directory/v1/customer/{customer}/roleassignments", + "httpMethod": "GET", + "id": "directory.roleAssignments.list", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + }, + "includeIndirectRoleAssignments": { + "description": "When set to `true`, fetches indirect role assignments (i.e. role assignment via a group) as well as direct ones. Defaults to `false`. You must specify `user_key` or the indirect role assignments will not be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "description": "Maximum number of results to return.", + "format": "int32", + "location": "query", + "maximum": "200", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "Token to specify the next page in the list.", + "location": "query", + "type": "string" + }, + "roleId": { + "description": "Immutable ID of a role. If included in the request, returns only role assignments containing this role ID.", + "location": "query", + "type": "string" + }, + "userKey": { + "description": "The primary email address, alias email address, or unique user or group ID. If included in the request, returns role assignments only for this user or group.", + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roleassignments", + "response": { + "$ref": "RoleAssignments" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement", + "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + ] + } + } + }, + "roles": { + "methods": { + "delete": { + "description": "Deletes a role.", + "flatPath": "admin/directory/v1/customer/{customer}/roles/{roleId}", + "httpMethod": "DELETE", + "id": "directory.roles.delete", + "parameterOrder": [ + "customer", + "roleId" + ], + "parameters": { + "customer": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "roleId": { + "description": "Immutable ID of the role.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roles/{roleId}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement" + ] + }, + "get": { + "description": "Retrieves a role.", + "flatPath": "admin/directory/v1/customer/{customer}/roles/{roleId}", + "httpMethod": "GET", + "id": "directory.roles.get", + "parameterOrder": [ + "customer", + "roleId" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + }, + "roleId": { + "description": "Immutable ID of the role.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roles/{roleId}", + "response": { + "$ref": "Role" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement", + "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + ] + }, + "insert": { + "description": "Creates a role.", + "flatPath": "admin/directory/v1/customer/{customer}/roles", + "httpMethod": "POST", + "id": "directory.roles.insert", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roles", + "request": { + "$ref": "Role" + }, + "response": { + "$ref": "Role" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement" + ] + }, + "list": { + "description": "Retrieves a paginated list of all the roles in a domain.", + "flatPath": "admin/directory/v1/customer/{customer}/roles", + "httpMethod": "GET", + "id": "directory.roles.list", + "parameterOrder": [ + "customer" + ], + "parameters": { + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "description": "Maximum number of results to return.", + "format": "int32", + "location": "query", + "maximum": "100", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "Token to specify the next page in the list.", + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roles", + "response": { + "$ref": "Roles" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement", + "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + ] + }, + "patch": { + "description": "Patches a role.", + "flatPath": "admin/directory/v1/customer/{customer}/roles/{roleId}", + "httpMethod": "PATCH", + "id": "directory.roles.patch", + "parameterOrder": [ + "customer", + "roleId" + ], + "parameters": { + "customer": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "roleId": { + "description": "Immutable ID of the role.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roles/{roleId}", + "request": { + "$ref": "Role" + }, + "response": { + "$ref": "Role" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement" + ] + }, + "update": { + "description": "Updates a role.", + "flatPath": "admin/directory/v1/customer/{customer}/roles/{roleId}", + "httpMethod": "PUT", + "id": "directory.roles.update", + "parameterOrder": [ + "customer", + "roleId" + ], + "parameters": { + "customer": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "roleId": { + "description": "Immutable ID of the role.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customer}/roles/{roleId}", + "request": { + "$ref": "Role" + }, + "response": { + "$ref": "Role" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.rolemanagement" + ] + } + } + }, + "schemas": { + "methods": { + "delete": { + "description": "Deletes a schema.", + "flatPath": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + "httpMethod": "DELETE", + "id": "directory.schemas.delete", + "parameterOrder": [ + "customerId", + "schemaKey" + ], + "parameters": { + "customerId": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "schemaKey": { + "description": "Name or immutable ID of the schema.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.userschema" + ] + }, + "get": { + "description": "Retrieves a schema.", + "flatPath": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + "httpMethod": "GET", + "id": "directory.schemas.get", + "parameterOrder": [ + "customerId", + "schemaKey" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + }, + "schemaKey": { + "description": "Name or immutable ID of the schema.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + "response": { + "$ref": "Schema" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.userschema", + "https://www.googleapis.com/auth/admin.directory.userschema.readonly" + ] + }, + "insert": { + "description": "Creates a schema.", + "flatPath": "admin/directory/v1/customer/{customerId}/schemas", + "httpMethod": "POST", + "id": "directory.schemas.insert", + "parameterOrder": [ + "customerId" + ], + "parameters": { + "customerId": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/schemas", + "request": { + "$ref": "Schema" + }, + "response": { + "$ref": "Schema" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.userschema" + ] + }, + "list": { + "description": "Retrieves all schemas for a customer.", + "flatPath": "admin/directory/v1/customer/{customerId}/schemas", + "httpMethod": "GET", + "id": "directory.schemas.list", + "parameterOrder": [ + "customerId" + ], + "parameters": { + "customerId": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/schemas", + "response": { + "$ref": "Schemas" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.userschema", + "https://www.googleapis.com/auth/admin.directory.userschema.readonly" + ] + }, + "patch": { + "description": "Patches a schema.", + "flatPath": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + "httpMethod": "PATCH", + "id": "directory.schemas.patch", + "parameterOrder": [ + "customerId", + "schemaKey" + ], + "parameters": { + "customerId": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "schemaKey": { + "description": "Name or immutable ID of the schema.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + "request": { + "$ref": "Schema" + }, + "response": { + "$ref": "Schema" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.userschema" + ] + }, + "update": { + "description": "Updates a schema.", + "flatPath": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + "httpMethod": "PUT", + "id": "directory.schemas.update", + "parameterOrder": [ + "customerId", + "schemaKey" + ], + "parameters": { + "customerId": { + "description": "Immutable ID of the Google Workspace account.", + "location": "path", + "required": true, + "type": "string" + }, + "schemaKey": { + "description": "Name or immutable ID of the schema.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + "request": { + "$ref": "Schema" + }, + "response": { + "$ref": "Schema" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.userschema" + ] + } + } + }, + "tokens": { + "methods": { + "delete": { + "description": "Deletes all access tokens issued by a user for an application.", + "flatPath": "admin/directory/v1/users/{userKey}/tokens/{clientId}", + "httpMethod": "DELETE", + "id": "directory.tokens.delete", + "parameterOrder": [ + "userKey", + "clientId" + ], + "parameters": { + "clientId": { + "description": "The Client ID of the application the token is issued to.", + "location": "path", + "required": true, + "type": "string" + }, + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/tokens/{clientId}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + }, + "get": { + "description": "Gets information about an access token issued by a user.", + "flatPath": "admin/directory/v1/users/{userKey}/tokens/{clientId}", + "httpMethod": "GET", + "id": "directory.tokens.get", + "parameterOrder": [ + "userKey", + "clientId" + ], + "parameters": { + "clientId": { + "description": "The Client ID of the application the token is issued to.", + "location": "path", + "required": true, + "type": "string" + }, + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/tokens/{clientId}", + "response": { + "$ref": "Token" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + }, + "list": { + "description": "Returns the set of tokens specified user has issued to 3rd party applications.", + "flatPath": "admin/directory/v1/users/{userKey}/tokens", + "httpMethod": "GET", + "id": "directory.tokens.list", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/tokens", + "response": { + "$ref": "Tokens" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + } + } + }, + "twoStepVerification": { + "methods": { + "turnOff": { + "description": "Turns off 2-Step Verification for user.", + "flatPath": "admin/directory/v1/users/{userKey}/twoStepVerification/turnOff", + "httpMethod": "POST", + "id": "directory.twoStepVerification.turnOff", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/twoStepVerification/turnOff", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + } + } + }, + "users": { + "methods": { + "delete": { + "description": "Deletes a user.", + "flatPath": "admin/directory/v1/users/{userKey}", + "httpMethod": "DELETE", + "id": "directory.users.delete", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user" + ] + }, + "get": { + "description": "Retrieves a user.", + "flatPath": "admin/directory/v1/users/{userKey}", + "httpMethod": "GET", + "id": "directory.users.get", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "customFieldMask": { + "description": "A comma-separated list of schema names. All fields from these schemas are fetched. This should only be set when `projection=custom`.", + "location": "query", + "type": "string" + }, + "projection": { + "default": "basic", + "description": "What subset of fields to fetch for this user.", + "enum": [ + "basic", + "custom", + "full" + ], + "enumDescriptions": [ + "Do not include any custom fields for the user.", + "Include custom fields from schemas requested in `customFieldMask`.", + "Include all fields associated with this user." + ], + "location": "query", + "type": "string" + }, + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + }, + "viewType": { + "default": "admin_view", + "description": "Whether to fetch the administrator-only or domain-wide public view of the user. For more information, see [Retrieve a user as a non-administrator](/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin).", + "enum": [ + "admin_view", + "domain_public" + ], + "enumDescriptions": [ + "Results include both administrator-only and domain-public fields for the user.", + "Results only include fields for the user that are publicly visible to other users in the domain." + ], + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}", + "response": { + "$ref": "User" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/admin.directory.user.readonly" + ] + }, + "insert": { + "description": "Creates a user. Mutate calls immediately following user creation might sometimes fail as the user isn't fully created due to propagation delay in our backends. Check the error details for the \"User creation is not complete\" message to see if this is the case. Retrying the calls after some time can help in this case.", + "flatPath": "admin/directory/v1/users", + "httpMethod": "POST", + "id": "directory.users.insert", + "parameterOrder": [], + "parameters": {}, + "path": "admin/directory/v1/users", + "request": { + "$ref": "User" + }, + "response": { + "$ref": "User" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user" + ] + }, + "list": { + "description": "Retrieves a paginated list of either deleted users or all users in a domain.", + "flatPath": "admin/directory/v1/users", + "httpMethod": "GET", + "id": "directory.users.list", + "parameterOrder": [], + "parameters": { + "customFieldMask": { + "description": "A comma-separated list of schema names. All fields from these schemas are fetched. This should only be set when `projection=custom`.", + "location": "query", + "type": "string" + }, + "customer": { + "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + "location": "query", + "type": "string" + }, + "domain": { + "description": "The domain name. Use this field to get groups from only one domain. To return all domains for a customer account, use the `customer` query parameter instead. Either the `customer` or the `domain` parameter must be provided.", + "location": "query", + "type": "string" + }, + "event": { + "description": "Event on which subscription is intended (if subscribing)", + "enum": [ + "add", + "delete", + "makeAdmin", + "undelete", + "update" + ], + "enumDescriptions": [ + "User Created Event", + "User Deleted Event", + "User Admin Status Change Event", + "User Undeleted Event", + "User Updated Event" + ], + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "100", + "description": "Maximum number of results to return.", + "format": "int32", + "location": "query", + "maximum": "500", + "minimum": "1", + "type": "integer" + }, + "orderBy": { + "description": "Property to use for sorting results.", + "enum": [ + "email", + "familyName", + "givenName" + ], + "enumDescriptions": [ + "Primary email of the user.", + "User's family name.", + "User's given name." + ], + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Token to specify next page in the list", + "location": "query", + "type": "string" + }, + "projection": { + "default": "basic", + "description": "What subset of fields to fetch for this user.", + "enum": [ + "basic", + "custom", + "full" + ], + "enumDescriptions": [ + "Do not include any custom fields for the user.", + "Include custom fields from schemas requested in `customFieldMask`.", + "Include all fields associated with this user." + ], + "location": "query", + "type": "string" + }, + "query": { + "description": "Query string for searching user fields. For more information on constructing user queries, see [Search for Users](/admin-sdk/directory/v1/guides/search-users).", + "location": "query", + "type": "string" + }, + "showDeleted": { + "description": "If set to `true`, retrieves the list of deleted users. (Default: `false`)", + "location": "query", + "type": "string" + }, + "sortOrder": { + "description": "Whether to return results in ascending or descending order, ignoring case.", + "enum": [ + "ASCENDING", + "DESCENDING" + ], + "enumDescriptions": [ + "Ascending order.", + "Descending order." + ], + "location": "query", + "type": "string" + }, + "viewType": { + "default": "admin_view", + "description": "Whether to fetch the administrator-only or domain-wide public view of the user. For more information, see [Retrieve a user as a non-administrator](/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin).", + "enum": [ + "admin_view", + "domain_public" + ], + "enumDescriptions": [ + "Results include both administrator-only and domain-public fields for the user.", + "Results only include fields for the user that are publicly visible to other users in the domain." + ], + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/users", + "response": { + "$ref": "Users" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/admin.directory.user.readonly", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "makeAdmin": { + "description": "Makes a user a super administrator.", + "flatPath": "admin/directory/v1/users/{userKey}/makeAdmin", + "httpMethod": "POST", + "id": "directory.users.makeAdmin", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/makeAdmin", + "request": { + "$ref": "UserMakeAdmin" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user" + ] + }, + "patch": { + "description": "Updates a user using patch semantics. The update method should be used instead, because it also supports patch semantics and has better performance. If you're mapping an external identity to a Google identity, use the [`update`](https://developers.google.com/admin-sdk/directory/v1/reference/users/update) method instead of the `patch` method. This method is unable to clear fields that contain repeated objects (`addresses`, `phones`, etc). Use the update method instead.", + "flatPath": "admin/directory/v1/users/{userKey}", + "httpMethod": "PATCH", + "id": "directory.users.patch", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}", + "request": { + "$ref": "User" + }, + "response": { + "$ref": "User" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user" + ] + }, + "signOut": { + "description": "Signs a user out of all web and device sessions and reset their sign-in cookies. User will have to sign in by authenticating again.", + "flatPath": "admin/directory/v1/users/{userKey}/signOut", + "httpMethod": "POST", + "id": "directory.users.signOut", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the target user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/signOut", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + }, + "undelete": { + "description": "Undeletes a deleted user.", + "flatPath": "admin/directory/v1/users/{userKey}/undelete", + "httpMethod": "POST", + "id": "directory.users.undelete", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "The immutable id of the user", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/undelete", + "request": { + "$ref": "UserUndelete" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user" + ] + }, + "update": { + "description": "Updates a user. This method supports patch semantics, meaning that you only need to include the fields you wish to update. Fields that are not present in the request will be preserved, and fields set to `null` will be cleared. For repeating fields that contain arrays, individual items in the array can't be patched piecemeal; they must be supplied in the request body with the desired values for all items. See the [user accounts guide](https://developers.google.com/admin-sdk/directory/v1/guides/manage-users#update_user) for more information.", + "flatPath": "admin/directory/v1/users/{userKey}", + "httpMethod": "PUT", + "id": "directory.users.update", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}", + "request": { + "$ref": "User" + }, + "response": { + "$ref": "User" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user" + ] + }, + "watch": { + "description": "Watches for changes in users list.", + "flatPath": "admin/directory/v1/users/watch", + "httpMethod": "POST", + "id": "directory.users.watch", + "parameterOrder": [], + "parameters": { + "customFieldMask": { + "description": "Comma-separated list of schema names. All fields from these schemas are fetched. This should only be set when projection=custom.", + "location": "query", + "type": "string" + }, + "customer": { + "description": "Immutable ID of the Google Workspace account. In case of multi-domain, to fetch all users for a customer, fill this field instead of domain.", + "location": "query", + "type": "string" + }, + "domain": { + "description": "Name of the domain. Fill this field to get users from only this domain. To return all users in a multi-domain fill customer field instead.\"", + "location": "query", + "type": "string" + }, + "event": { + "description": "Events to watch for.", + "enum": [ + "add", + "delete", + "makeAdmin", + "undelete", + "update" + ], + "enumDescriptions": [ + "User Created Event", + "User Deleted Event", + "User Admin Status Change Event", + "User Undeleted Event", + "User Updated Event" + ], + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "100", + "description": "Maximum number of results to return.", + "format": "int32", + "location": "query", + "maximum": "500", + "minimum": "1", + "type": "integer" + }, + "orderBy": { + "description": "Column to use for sorting results", + "enum": [ + "email", + "familyName", + "givenName" + ], + "enumDescriptions": [ + "Primary email of the user.", + "User's family name.", + "User's given name." + ], + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Token to specify next page in the list", + "location": "query", + "type": "string" + }, + "projection": { + "default": "basic", + "description": "What subset of fields to fetch for this user.", + "enum": [ + "basic", + "custom", + "full" + ], + "enumDescriptions": [ + "Do not include any custom fields for the user.", + "Include custom fields from schemas mentioned in customFieldMask.", + "Include all fields associated with this user." + ], + "location": "query", + "type": "string" + }, + "query": { + "description": "Query string search. Should be of the form \"\". Complete documentation is at https: //developers.google.com/admin-sdk/directory/v1/guides/search-users", + "location": "query", + "type": "string" + }, + "showDeleted": { + "description": "If set to true, retrieves the list of deleted users. (Default: false)", + "location": "query", + "type": "string" + }, + "sortOrder": { + "description": "Whether to return results in ascending or descending order.", + "enum": [ + "ASCENDING", + "DESCENDING" + ], + "enumDescriptions": [ + "Ascending order.", + "Descending order." + ], + "location": "query", + "type": "string" + }, + "viewType": { + "default": "admin_view", + "description": "Whether to fetch the administrator-only or domain-wide public view of the user. For more information, see [Retrieve a user as a non-administrator](/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin).", + "enum": [ + "admin_view", + "domain_public" + ], + "enumDescriptions": [ + "Results include both administrator-only and domain-public fields.", + "Results only include fields for the user that are publicly visible to other users in the domain." + ], + "location": "query", + "type": "string" + } + }, + "path": "admin/directory/v1/users/watch", + "request": { + "$ref": "Channel" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/admin.directory.user.readonly", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "aliases": { + "methods": { + "delete": { + "description": "Removes an alias.", + "flatPath": "admin/directory/v1/users/{userKey}/aliases/{alias}", + "httpMethod": "DELETE", + "id": "directory.users.aliases.delete", + "parameterOrder": [ + "userKey", + "alias" + ], + "parameters": { + "alias": { + "description": "The alias to be removed.", + "location": "path", + "required": true, + "type": "string" + }, + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/aliases/{alias}", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/admin.directory.user.alias" + ] + }, + "insert": { + "description": "Adds an alias.", + "flatPath": "admin/directory/v1/users/{userKey}/aliases", + "httpMethod": "POST", + "id": "directory.users.aliases.insert", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/aliases", + "request": { + "$ref": "Alias" + }, + "response": { + "$ref": "Alias" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/admin.directory.user.alias" + ] + }, + "list": { + "description": "Lists all aliases for a user.", + "flatPath": "admin/directory/v1/users/{userKey}/aliases", + "httpMethod": "GET", + "id": "directory.users.aliases.list", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "event": { + "description": "Events to watch for.", + "enum": [ + "add", + "delete" + ], + "enumDescriptions": [ + "Alias Created Event", + "Alias Deleted Event" + ], + "location": "query", + "type": "string" + }, + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/aliases", + "response": { + "$ref": "Aliases" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/admin.directory.user.alias", + "https://www.googleapis.com/auth/admin.directory.user.alias.readonly", + "https://www.googleapis.com/auth/admin.directory.user.readonly" + ] + }, + "watch": { + "description": "Watches for changes in users list.", + "flatPath": "admin/directory/v1/users/{userKey}/aliases/watch", + "httpMethod": "POST", + "id": "directory.users.aliases.watch", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "event": { + "description": "Events to watch for.", + "enum": [ + "add", + "delete" + ], + "enumDescriptions": [ + "Alias Created Event", + "Alias Deleted Event" + ], + "location": "query", + "type": "string" + }, + "userKey": { + "description": "Email or immutable ID of the user", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/aliases/watch", + "request": { + "$ref": "Channel" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/admin.directory.user.alias", + "https://www.googleapis.com/auth/admin.directory.user.alias.readonly", + "https://www.googleapis.com/auth/admin.directory.user.readonly" + ] + } + } + }, + "photos": { + "methods": { + "delete": { + "description": "Removes the user's photo.", + "flatPath": "admin/directory/v1/users/{userKey}/photos/thumbnail", + "httpMethod": "DELETE", + "id": "directory.users.photos.delete", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/photos/thumbnail", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user" + ] + }, + "get": { + "description": "Retrieves the user's photo.", + "flatPath": "admin/directory/v1/users/{userKey}/photos/thumbnail", + "httpMethod": "GET", + "id": "directory.users.photos.get", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/photos/thumbnail", + "response": { + "$ref": "UserPhoto" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/admin.directory.user.readonly" + ] + }, + "patch": { + "description": "Adds a photo for the user. This method supports [patch semantics](/admin-sdk/directory/v1/guides/performance#patch).", + "flatPath": "admin/directory/v1/users/{userKey}/photos/thumbnail", + "httpMethod": "PATCH", + "id": "directory.users.photos.patch", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/photos/thumbnail", + "request": { + "$ref": "UserPhoto" + }, + "response": { + "$ref": "UserPhoto" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user" + ] + }, + "update": { + "description": "Adds a photo for the user.", + "flatPath": "admin/directory/v1/users/{userKey}/photos/thumbnail", + "httpMethod": "PUT", + "id": "directory.users.photos.update", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/photos/thumbnail", + "request": { + "$ref": "UserPhoto" + }, + "response": { + "$ref": "UserPhoto" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user" + ] + } + } + } + } + }, + "verificationCodes": { + "methods": { + "generate": { + "description": "Generates new backup verification codes for the user.", + "flatPath": "admin/directory/v1/users/{userKey}/verificationCodes/generate", + "httpMethod": "POST", + "id": "directory.verificationCodes.generate", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Email or immutable ID of the user", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/verificationCodes/generate", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + }, + "invalidate": { + "description": "Invalidates the current backup verification codes for the user.", + "flatPath": "admin/directory/v1/users/{userKey}/verificationCodes/invalidate", + "httpMethod": "POST", + "id": "directory.verificationCodes.invalidate", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Email or immutable ID of the user", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/verificationCodes/invalidate", + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + }, + "list": { + "description": "Returns the current set of valid backup verification codes for the specified user.", + "flatPath": "admin/directory/v1/users/{userKey}/verificationCodes", + "httpMethod": "GET", + "id": "directory.verificationCodes.list", + "parameterOrder": [ + "userKey" + ], + "parameters": { + "userKey": { + "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "admin/directory/v1/users/{userKey}/verificationCodes", + "response": { + "$ref": "VerificationCodes" + }, + "scopes": [ + "https://www.googleapis.com/auth/admin.directory.user.security" + ] + } + } + } + }, + "revision": "20230724", + "rootUrl": "https://admin.googleapis.com/", + "schemas": { + "Alias": { + "description": "JSON template for Alias object in Directory API.", + "id": "Alias", + "properties": { + "alias": { + "type": "string" + }, + "etag": { + "type": "string" + }, + "id": { + "type": "string" + }, + "kind": { + "default": "admin#directory#alias", + "type": "string" + }, + "primaryEmail": { + "type": "string" + } + }, + "type": "object" + }, + "Aliases": { + "description": "JSON response template to list aliases in Directory API.", + "id": "Aliases", + "properties": { + "aliases": { + "items": { + "type": "any" + }, + "type": "array" + }, + "etag": { + "type": "string" + }, + "kind": { + "default": "admin#directory#aliases", + "type": "string" + } + }, + "type": "object" + }, + "Asp": { + "description": "An application-specific password (ASP) is used with applications that do not accept a verification code when logging into the application on certain devices. The ASP access code is used instead of the login and password you commonly use when accessing an application through a browser. For more information about ASPs and how to create one, see the [help center](https://support.google.com/a/answer/2537800#asp).", + "id": "Asp", + "properties": { + "codeId": { + "description": "The unique ID of the ASP.", + "format": "int32", + "type": "integer" + }, + "creationTime": { + "description": "The time when the ASP was created. Expressed in [Unix time](https://en.wikipedia.org/wiki/Epoch_time) format.", + "format": "int64", + "type": "string" + }, + "etag": { + "description": "ETag of the ASP.", + "type": "string" + }, + "kind": { + "default": "admin#directory#asp", + "description": "The type of the API resource. This is always `admin#directory#asp`.", + "type": "string" + }, + "lastTimeUsed": { + "description": "The time when the ASP was last used. Expressed in [Unix time](https://en.wikipedia.org/wiki/Epoch_time) format.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "The name of the application that the user, represented by their `userId`, entered when the ASP was created.", + "type": "string" + }, + "userKey": { + "description": "The unique ID of the user who issued the ASP.", + "type": "string" + } + }, + "type": "object" + }, + "Asps": { + "id": "Asps", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "items": { + "description": "A list of ASP resources.", + "items": { + "$ref": "Asp" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#aspList", + "description": "The type of the API resource. This is always `admin#directory#aspList`.", + "type": "string" + } + }, + "type": "object" + }, + "AuxiliaryMessage": { + "description": "Auxiliary message about issues with printers or settings. Example: {message_type:AUXILIARY_MESSAGE_WARNING, field_mask:make_and_model, message:\"Given printer is invalid or no longer supported.\"}", + "id": "AuxiliaryMessage", + "properties": { + "auxiliaryMessage": { + "description": "Human readable message in English. Example: \"Given printer is invalid or no longer supported.\"", + "type": "string" + }, + "fieldMask": { + "description": "Field that this message concerns.", + "format": "google-fieldmask", + "type": "string" + }, + "severity": { + "description": "Message severity", + "enum": [ + "SEVERITY_UNSPECIFIED", + "SEVERITY_INFO", + "SEVERITY_WARNING", + "SEVERITY_ERROR" + ], + "enumDescriptions": [ + "Message type unspecified.", + "Message of severity: info.", + "Message of severity: warning.", + "Message of severity: error." + ], + "type": "string" + } + }, + "type": "object" + }, + "BatchCreatePrintServersRequest": { + "description": "Request to add multiple new print servers in a batch.", + "id": "BatchCreatePrintServersRequest", + "properties": { + "requests": { + "description": "Required. A list of `PrintServer` resources to be created (max `50` per batch).", + "items": { + "$ref": "CreatePrintServerRequest" + }, + "type": "array" + } + }, + "type": "object" + }, + "BatchCreatePrintServersResponse": { + "id": "BatchCreatePrintServersResponse", + "properties": { + "failures": { + "description": "A list of create failures. `PrintServer` IDs are not populated, as print servers were not created.", + "items": { + "$ref": "PrintServerFailureInfo" + }, + "type": "array" + }, + "printServers": { + "description": "A list of successfully created print servers with their IDs populated.", + "items": { + "$ref": "PrintServer" + }, + "type": "array" + } + }, + "type": "object" + }, + "BatchCreatePrintersRequest": { + "description": "Request for adding new printers in batch.", + "id": "BatchCreatePrintersRequest", + "properties": { + "requests": { + "description": "A list of Printers to be created. Max 50 at a time.", + "items": { + "$ref": "CreatePrinterRequest" + }, + "type": "array" + } + }, + "type": "object" + }, + "BatchCreatePrintersResponse": { + "description": "Response for adding new printers in batch.", + "id": "BatchCreatePrintersResponse", + "properties": { + "failures": { + "description": "A list of create failures. Printer IDs are not populated, as printer were not created.", + "items": { + "$ref": "FailureInfo" + }, + "type": "array" + }, + "printers": { + "description": "A list of successfully created printers with their IDs populated.", + "items": { + "$ref": "Printer" + }, + "type": "array" + } + }, + "type": "object" + }, + "BatchDeletePrintServersRequest": { + "description": "Request to delete multiple existing print servers in a batch.", + "id": "BatchDeletePrintServersRequest", + "properties": { + "printServerIds": { + "description": "A list of print server IDs that should be deleted (max `100` per batch).", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "BatchDeletePrintServersResponse": { + "id": "BatchDeletePrintServersResponse", + "properties": { + "failedPrintServers": { + "description": "A list of update failures.", + "items": { + "$ref": "PrintServerFailureInfo" + }, + "type": "array" + }, + "printServerIds": { + "description": "A list of print server IDs that were successfully deleted.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "BatchDeletePrintersRequest": { + "description": "Request for deleting existing printers in batch.", + "id": "BatchDeletePrintersRequest", + "properties": { + "printerIds": { + "description": "A list of Printer.id that should be deleted. Max 100 at a time.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "BatchDeletePrintersResponse": { + "description": "Response for deleting existing printers in batch.", + "id": "BatchDeletePrintersResponse", + "properties": { + "failedPrinters": { + "description": "A list of update failures.", + "items": { + "$ref": "FailureInfo" + }, + "type": "array" + }, + "printerIds": { + "description": "A list of Printer.id that were successfully deleted.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "Building": { + "description": "Public API: Resources.buildings", + "id": "Building", + "properties": { + "address": { + "$ref": "BuildingAddress", + "description": "The postal address of the building. See [`PostalAddress`](/my-business/reference/rest/v4/PostalAddress) for details. Note that only a single address line and region code are required." + }, + "buildingId": { + "description": "Unique identifier for the building. The maximum length is 100 characters.", + "type": "string" + }, + "buildingName": { + "description": "The building name as seen by users in Calendar. Must be unique for the customer. For example, \"NYC-CHEL\". The maximum length is 100 characters.", + "type": "string" + }, + "coordinates": { + "$ref": "BuildingCoordinates", + "description": "The geographic coordinates of the center of the building, expressed as latitude and longitude in decimal degrees." + }, + "description": { + "description": "A brief description of the building. For example, \"Chelsea Market\".", + "type": "string" + }, + "etags": { + "description": "ETag of the resource.", + "type": "string" + }, + "floorNames": { + "description": "The display names for all floors in this building. The floors are expected to be sorted in ascending order, from lowest floor to highest floor. For example, [\"B2\", \"B1\", \"L\", \"1\", \"2\", \"2M\", \"3\", \"PH\"] Must contain at least one entry.", + "items": { + "type": "string" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#resources#buildings#Building", + "description": "Kind of resource this is.", + "type": "string" + } + }, + "type": "object" + }, + "BuildingAddress": { + "description": "Public API: Resources.buildings", + "id": "BuildingAddress", + "properties": { + "addressLines": { + "description": "Unstructured address lines describing the lower levels of an address.", + "items": { + "type": "string" + }, + "type": "array" + }, + "administrativeArea": { + "description": "Optional. Highest administrative subdivision which is used for postal addresses of a country or region.", + "type": "string" + }, + "languageCode": { + "description": "Optional. BCP-47 language code of the contents of this address (if known).", + "type": "string" + }, + "locality": { + "description": "Optional. Generally refers to the city/town portion of the address. Examples: US city, IT comune, UK post town. In regions of the world where localities are not well defined or do not fit into this structure well, leave locality empty and use addressLines.", + "type": "string" + }, + "postalCode": { + "description": "Optional. Postal code of the address.", + "type": "string" + }, + "regionCode": { + "description": "Required. CLDR region code of the country/region of the address.", + "type": "string" + }, + "sublocality": { + "description": "Optional. Sublocality of the address.", + "type": "string" + } + }, + "type": "object" + }, + "BuildingCoordinates": { + "description": "Public API: Resources.buildings", + "id": "BuildingCoordinates", + "properties": { + "latitude": { + "description": "Latitude in decimal degrees.", + "format": "double", + "type": "number" + }, + "longitude": { + "description": "Longitude in decimal degrees.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "Buildings": { + "description": "Public API: Resources.buildings", + "id": "Buildings", + "properties": { + "buildings": { + "description": "The Buildings in this page of results.", + "items": { + "$ref": "Building" + }, + "type": "array" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#resources#buildings#buildingsList", + "description": "Kind of resource this is.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "CalendarResource": { + "description": "Public API: Resources.calendars", + "id": "CalendarResource", + "properties": { + "buildingId": { + "description": "Unique ID for the building a resource is located in.", + "type": "string" + }, + "capacity": { + "description": "Capacity of a resource, number of seats in a room.", + "format": "int32", + "type": "integer" + }, + "etags": { + "description": "ETag of the resource.", + "type": "string" + }, + "featureInstances": { + "description": "Instances of features for the calendar resource.", + "type": "any" + }, + "floorName": { + "description": "Name of the floor a resource is located on.", + "type": "string" + }, + "floorSection": { + "description": "Name of the section within a floor a resource is located in.", + "type": "string" + }, + "generatedResourceName": { + "description": "The read-only auto-generated name of the calendar resource which includes metadata about the resource such as building name, floor, capacity, etc. For example, \"NYC-2-Training Room 1A (16)\".", + "type": "string" + }, + "kind": { + "default": "admin#directory#resources#calendars#CalendarResource", + "description": "The type of the resource. For calendar resources, the value is `admin#directory#resources#calendars#CalendarResource`.", + "type": "string" + }, + "resourceCategory": { + "description": "The category of the calendar resource. Either CONFERENCE_ROOM or OTHER. Legacy data is set to CATEGORY_UNKNOWN.", + "type": "string" + }, + "resourceDescription": { + "description": "Description of the resource, visible only to admins.", + "type": "string" + }, + "resourceEmail": { + "description": "The read-only email for the calendar resource. Generated as part of creating a new calendar resource.", + "type": "string" + }, + "resourceId": { + "annotations": { + "required": [ + "directory.resources.calendars.insert" + ] + }, + "description": "The unique ID for the calendar resource.", + "type": "string" + }, + "resourceName": { + "annotations": { + "required": [ + "directory.resources.calendars.insert" + ] + }, + "description": "The name of the calendar resource. For example, \"Training Room 1A\".", + "type": "string" + }, + "resourceType": { + "description": "The type of the calendar resource, intended for non-room resources.", + "type": "string" + }, + "userVisibleDescription": { + "description": "Description of the resource, visible to users and admins.", + "type": "string" + } + }, + "type": "object" + }, + "CalendarResources": { + "description": "Public API: Resources.calendars", + "id": "CalendarResources", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "items": { + "description": "The CalendarResources in this page of results.", + "items": { + "$ref": "CalendarResource" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#resources#calendars#calendarResourcesList", + "description": "Identifies this as a collection of CalendarResources. This is always `admin#directory#resources#calendars#calendarResourcesList`.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "Channel": { + "description": "An notification channel used to watch for resource changes.", + "id": "Channel", + "properties": { + "address": { + "description": "The address where notifications are delivered for this channel.", + "type": "string" + }, + "expiration": { + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64", + "type": "string" + }, + "id": { + "description": "A UUID or similar unique string that identifies this channel.", + "type": "string" + }, + "kind": { + "default": "api#channel", + "description": "Identifies this as a notification channel used to watch for changes to a resource, which is `api#channel`.", + "type": "string" + }, + "params": { + "additionalProperties": { + "type": "string" + }, + "description": "Additional parameters controlling delivery channel behavior. Optional. For example, `params.ttl` specifies the time-to-live in seconds for the notification channel, where the default is 2 hours and the maximum TTL is 2 days.", + "type": "object" + }, + "payload": { + "description": "A Boolean value to indicate whether payload is wanted. Optional.", + "type": "boolean" + }, + "resourceId": { + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions.", + "type": "string" + }, + "resourceUri": { + "description": "A version-specific identifier for the watched resource.", + "type": "string" + }, + "token": { + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional.", + "type": "string" + }, + "type": { + "description": "The type of delivery mechanism used for this channel.", + "type": "string" + } + }, + "type": "object" + }, + "ChromeOsDevice": { + "description": "Google Chrome devices run on the [Chrome OS](https://support.google.com/chromeos). For more information about common API tasks, see the [Developer's Guide](/admin-sdk/directory/v1/guides/manage-chrome-devices).", + "id": "ChromeOsDevice", + "properties": { + "activeTimeRanges": { + "description": "A list of active time ranges (Read-only).", + "items": { + "properties": { + "activeTime": { + "description": "Duration of usage in milliseconds.", + "format": "int32", + "type": "integer" + }, + "date": { + "description": "Date of usage", + "format": "date", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "annotatedAssetId": { + "description": "The asset identifier as noted by an administrator or specified during enrollment.", + "type": "string" + }, + "annotatedLocation": { + "description": "The address or location of the device as noted by the administrator. Maximum length is `200` characters. Empty values are allowed.", + "type": "string" + }, + "annotatedUser": { + "description": "The user of the device as noted by the administrator. Maximum length is 100 characters. Empty values are allowed.", + "type": "string" + }, + "autoUpdateExpiration": { + "description": "(Read-only) The timestamp after which the device will stop receiving Chrome updates or support", + "format": "int64", + "type": "string" + }, + "bootMode": { + "description": "The boot mode for the device. The possible values are: * `Verified`: The device is running a valid version of the Chrome OS. * `Dev`: The devices's developer hardware switch is enabled. When booted, the device has a command line shell. For an example of a developer switch, see the [Chromebook developer information](https://www.chromium.org/chromium-os/developer-information-for-chrome-os-devices/samsung-series-5-chromebook#TOC-Developer-switch).", + "type": "string" + }, + "cpuInfo": { + "description": "Information regarding CPU specs in the device.", + "items": { + "description": "CPU specs for a CPU.", + "properties": { + "architecture": { + "description": "The CPU architecture.", + "type": "string" + }, + "logicalCpus": { + "description": "Information for the Logical CPUs", + "items": { + "description": "Status of a single logical CPU.", + "properties": { + "cStates": { + "description": "C-States indicate the power consumption state of the CPU. For more information look at documentation published by the CPU maker.", + "items": { + "description": "Status of a single C-state. C-states are various modes the CPU can transition to in order to use more or less power.", + "properties": { + "displayName": { + "description": "Name of the state.", + "type": "string" + }, + "sessionDuration": { + "description": "Time spent in the state since the last reboot.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "currentScalingFrequencyKhz": { + "description": "Current frequency the CPU is running at.", + "format": "int32", + "type": "integer" + }, + "idleDuration": { + "description": "Idle time since last boot.", + "format": "google-duration", + "type": "string" + }, + "maxScalingFrequencyKhz": { + "description": "Maximum frequency the CPU is allowed to run at, by policy.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "type": "array" + }, + "maxClockSpeedKhz": { + "description": "The max CPU clock speed in kHz.", + "format": "int32", + "type": "integer" + }, + "model": { + "description": "The CPU model name.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "cpuStatusReports": { + "description": "Reports of CPU utilization and temperature (Read-only)", + "items": { + "properties": { + "cpuTemperatureInfo": { + "description": "A list of CPU temperature samples.", + "items": { + "properties": { + "label": { + "description": "CPU label", + "type": "string" + }, + "temperature": { + "description": "Temperature in Celsius degrees.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "type": "array" + }, + "cpuUtilizationPercentageInfo": { + "items": { + "format": "int32", + "type": "integer" + }, + "type": "array" + }, + "reportTime": { + "description": "Date and time the report was received.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "deprovisionReason": { + "description": "(Read-only) Deprovision reason.", + "enum": [ + "deprovisionReasonUnspecified", + "deprovisionReasonSameModelReplacement", + "deprovisionReasonUpgrade", + "deprovisionReasonDomainMove", + "deprovisionReasonServiceExpiration", + "deprovisionReasonOther", + "deprovisionReasonDifferentModelReplacement", + "deprovisionReasonRetiringDevice", + "deprovisionReasonUpgradeTransfer", + "deprovisionReasonNotRequired", + "deprovisionReasonRepairCenter" + ], + "enumDescriptions": [ + "The deprovision reason is unknown.", + "The device was replaced by a device with the same model.", + "The device was upgraded.", + "The device's domain was changed.", + "Service expired for the device.", + "The device was deprovisioned for a legacy reason that is no longer supported.", + "The device was replaced by a device with a different model.", + "The device was retired.", + "The device's perpetual upgrade was transferred to a new device.", + "A reason was not required. For example, the licenses were returned to the customer's license pool.", + "The device was deprovisioned by a repair service center." + ], + "type": "string" + }, + "deviceFiles": { + "description": "A list of device files to download (Read-only)", + "items": { + "properties": { + "createTime": { + "description": "Date and time the file was created", + "format": "date-time", + "type": "string" + }, + "downloadUrl": { + "description": "File download URL", + "type": "string" + }, + "name": { + "description": "File name", + "type": "string" + }, + "type": { + "description": "File type", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "deviceId": { + "description": "The unique ID of the Chrome device.", + "type": "string" + }, + "deviceLicenseType": { + "description": "Output only. Device license type.", + "enum": [ + "deviceLicenseTypeUnspecified", + "enterprise", + "enterpriseUpgrade", + "educationUpgrade", + "education", + "terminal", + "kioskUpgrade" + ], + "enumDescriptions": [ + "UNSPECIFIED type.", + "Indicating the device is a Chromebook/Chromebox/Chromebase enterprise, which is packaged with an upgrade(license).", + "Indicating the device is consuming standalone Chrome Enterprise Upgrade, a Chrome Enterprise license.", + "Indicating the device is consuming Chrome Education Upgrade(AKA Chrome EDU perpetual license).", + "Packaged with a license as education.", + "Packaged with a license as terminal.", + "Indicating the device is consuming standalone Chrome Kiosk Upgrade, a Chrome Kiosk (annual) license." + ], + "readOnly": true, + "type": "string" + }, + "diskVolumeReports": { + "description": "Reports of disk space and other info about mounted/connected volumes.", + "items": { + "properties": { + "volumeInfo": { + "description": "Disk volumes", + "items": { + "properties": { + "storageFree": { + "description": "Free disk space [in bytes]", + "format": "int64", + "type": "string" + }, + "storageTotal": { + "description": "Total disk space [in bytes]", + "format": "int64", + "type": "string" + }, + "volumeId": { + "description": "Volume id", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + }, + "dockMacAddress": { + "description": "(Read-only) Built-in MAC address for the docking station that the device connected to. Factory sets Media access control address (MAC address) assigned for use by a dock. It is reserved specifically for MAC pass through device policy. The format is twelve (12) hexadecimal digits without any delimiter (uppercase letters). This is only relevant for some devices.", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "ethernetMacAddress": { + "description": "The device's MAC address on the ethernet network interface.", + "type": "string" + }, + "ethernetMacAddress0": { + "description": "(Read-only) MAC address used by the Chromebook’s internal ethernet port, and for onboard network (ethernet) interface. The format is twelve (12) hexadecimal digits without any delimiter (uppercase letters). This is only relevant for some devices.", + "type": "string" + }, + "firmwareVersion": { + "description": "The Chrome device's firmware version.", + "type": "string" + }, + "firstEnrollmentTime": { + "description": "Date and time for the first time the device was enrolled.", + "type": "string" + }, + "kind": { + "default": "admin#directory#chromeosdevice", + "description": "The type of resource. For the Chromeosdevices resource, the value is `admin#directory#chromeosdevice`.", + "type": "string" + }, + "lastDeprovisionTimestamp": { + "description": "(Read-only) Date and time for the last deprovision of the device.", + "type": "string" + }, + "lastEnrollmentTime": { + "description": "Date and time the device was last enrolled (Read-only)", + "format": "date-time", + "type": "string" + }, + "lastKnownNetwork": { + "description": "Contains last known network (Read-only)", + "items": { + "description": "Information for an ip address.", + "properties": { + "ipAddress": { + "description": "The IP address.", + "type": "string" + }, + "wanIpAddress": { + "description": "The WAN IP address.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "lastSync": { + "description": "Date and time the device was last synchronized with the policy settings in the G Suite administrator control panel (Read-only)", + "format": "date-time", + "type": "string" + }, + "macAddress": { + "description": "The device's wireless MAC address. If the device does not have this information, it is not included in the response.", + "type": "string" + }, + "manufactureDate": { + "description": "(Read-only) The date the device was manufactured in yyyy-mm-dd format.", + "type": "string" + }, + "meid": { + "description": "The Mobile Equipment Identifier (MEID) or the International Mobile Equipment Identity (IMEI) for the 3G mobile card in a mobile device. A MEID/IMEI is typically used when adding a device to a wireless carrier's post-pay service plan. If the device does not have this information, this property is not included in the response. For more information on how to export a MEID/IMEI list, see the [Developer's Guide](/admin-sdk/directory/v1/guides/manage-chrome-devices.html#export_meid).", + "type": "string" + }, + "model": { + "description": "The device's model information. If the device does not have this information, this property is not included in the response.", + "type": "string" + }, + "notes": { + "description": "Notes about this device added by the administrator. This property can be [searched](https://support.google.com/chrome/a/answer/1698333) with the [list](/admin-sdk/directory/v1/reference/chromeosdevices/list) method's `query` parameter. Maximum length is 500 characters. Empty values are allowed.", + "type": "string" + }, + "orderNumber": { + "description": "The device's order number. Only devices directly purchased from Google have an order number.", + "type": "string" + }, + "orgUnitId": { + "description": "The unique ID of the organizational unit. orgUnitPath is the human readable version of orgUnitId. While orgUnitPath may change by renaming an organizational unit within the path, orgUnitId is unchangeable for one organizational unit. This property can be [updated](/admin-sdk/directory/v1/guides/manage-chrome-devices#move_chrome_devices_to_ou) using the API. For more information about how to create an organizational structure for your device, see the [administration help center](https://support.google.com/a/answer/182433).", + "type": "string" + }, + "orgUnitPath": { + "description": "The full parent path with the organizational unit's name associated with the device. Path names are case insensitive. If the parent organizational unit is the top-level organization, it is represented as a forward slash, `/`. This property can be [updated](/admin-sdk/directory/v1/guides/manage-chrome-devices#move_chrome_devices_to_ou) using the API. For more information about how to create an organizational structure for your device, see the [administration help center](https://support.google.com/a/answer/182433).", + "type": "string" + }, + "osUpdateStatus": { + "$ref": "OsUpdateStatus", + "description": "The status of the OS updates for the device." + }, + "osVersion": { + "description": "The Chrome device's operating system version.", + "type": "string" + }, + "platformVersion": { + "description": "The Chrome device's platform version.", + "type": "string" + }, + "recentUsers": { + "description": "A list of recent device users, in descending order, by last login time.", + "items": { + "description": "A list of recent device users, in descending order, by last login time.", + "properties": { + "email": { + "description": "The user's email address. This is only present if the user type is `USER_TYPE_MANAGED`.", + "type": "string" + }, + "type": { + "description": "The type of the user.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "screenshotFiles": { + "description": "A list of screenshot files to download. Type is always \"SCREENSHOT_FILE\". (Read-only)", + "items": { + "properties": { + "createTime": { + "description": "Date and time the file was created", + "format": "date-time", + "type": "string" + }, + "downloadUrl": { + "description": "File download URL", + "type": "string" + }, + "name": { + "description": "File name", + "type": "string" + }, + "type": { + "description": "File type", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "serialNumber": { + "description": "The Chrome device serial number entered when the device was enabled. This value is the same as the Admin console's *Serial Number* in the *Chrome OS Devices* tab.", + "type": "string" + }, + "status": { + "description": "The status of the device.", + "type": "string" + }, + "supportEndDate": { + "description": "Final date the device will be supported (Read-only)", + "format": "date-time", + "type": "string" + }, + "systemRamFreeReports": { + "description": "Reports of amounts of available RAM memory (Read-only)", + "items": { + "properties": { + "reportTime": { + "description": "Date and time the report was received.", + "format": "date-time", + "type": "string" + }, + "systemRamFreeInfo": { + "items": { + "format": "int64", + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + }, + "systemRamTotal": { + "description": "Total RAM on the device [in bytes] (Read-only)", + "format": "int64", + "type": "string" + }, + "tpmVersionInfo": { + "description": "Trusted Platform Module (TPM) (Read-only)", + "properties": { + "family": { + "description": "TPM family. We use the TPM 2.0 style encoding, e.g.: TPM 1.2: \"1.2\" -\u003e 312e3200 TPM 2.0: \"2.0\" -\u003e 322e3000", + "type": "string" + }, + "firmwareVersion": { + "description": "TPM firmware version.", + "type": "string" + }, + "manufacturer": { + "description": "TPM manufacturer code.", + "type": "string" + }, + "specLevel": { + "description": "TPM specification level. See Library Specification for TPM 2.0 and Main Specification for TPM 1.2.", + "type": "string" + }, + "tpmModel": { + "description": "TPM model number.", + "type": "string" + }, + "vendorSpecific": { + "description": "Vendor-specific information such as Vendor ID.", + "type": "string" + } + }, + "type": "object" + }, + "willAutoRenew": { + "description": "Determines if the device will auto renew its support after the support end date. This is a read-only property.", + "type": "boolean" + } + }, + "type": "object" + }, + "ChromeOsDeviceAction": { + "description": "Data about an update to the status of a Chrome OS device.", + "id": "ChromeOsDeviceAction", + "properties": { + "action": { + "annotations": { + "required": [ + "directory.chromeosdevices.action" + ] + }, + "description": "Action to be taken on the Chrome OS device.", + "type": "string" + }, + "deprovisionReason": { + "description": "Only used when the action is `deprovision`. With the `deprovision` action, this field is required. *Note*: The deprovision reason is audited because it might have implications on licenses for perpetual subscription customers.", + "type": "string" + } + }, + "type": "object" + }, + "ChromeOsDevices": { + "id": "ChromeOsDevices", + "properties": { + "chromeosdevices": { + "description": "A list of Chrome OS Device objects.", + "items": { + "$ref": "ChromeOsDevice" + }, + "type": "array" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#chromeosdevices", + "description": "Kind of resource this is.", + "type": "string" + }, + "nextPageToken": { + "description": "Token used to access the next page of this result. To access the next page, use this token's value in the `pageToken` query string of this request.", + "type": "string" + } + }, + "type": "object" + }, + "ChromeOsMoveDevicesToOu": { + "id": "ChromeOsMoveDevicesToOu", + "properties": { + "deviceIds": { + "annotations": { + "required": [ + "directory.chromeosdevices.moveDevicesToOu" + ] + }, + "description": "Chrome OS devices to be moved to OU", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "CreatePrintServerRequest": { + "description": "Request for adding a new print server.", + "id": "CreatePrintServerRequest", + "properties": { + "parent": { + "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{id}`", + "type": "string" + }, + "printServer": { + "$ref": "PrintServer", + "description": "Required. A print server to create. If you want to place the print server under a specific organizational unit (OU), then populate the `org_unit_id`. Otherwise the print server is created under the root OU. The `org_unit_id` can be retrieved using the [Directory API](https://developers.google.com/admin-sdk/directory/v1/guides/manage-org-units)." + } + }, + "type": "object" + }, + "CreatePrinterRequest": { + "description": "Request for adding a new printer.", + "id": "CreatePrinterRequest", + "properties": { + "parent": { + "description": "Required. The name of the customer. Format: customers/{customer_id}", + "type": "string" + }, + "printer": { + "$ref": "Printer", + "description": "Required. A printer to create. If you want to place the printer under particular OU then populate printer.org_unit_id filed. Otherwise the printer will be placed under root OU." + } + }, + "type": "object" + }, + "Customer": { + "id": "Customer", + "properties": { + "alternateEmail": { + "description": "The customer's secondary contact email address. This email address cannot be on the same domain as the `customerDomain`", + "type": "string" + }, + "customerCreationTime": { + "description": "The customer's creation time (Readonly)", + "format": "date-time", + "type": "string" + }, + "customerDomain": { + "description": "The customer's primary domain name string. Do not include the `www` prefix when creating a new customer.", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "id": { + "description": "The unique ID for the customer's Google Workspace account. (Readonly)", + "type": "string" + }, + "kind": { + "default": "admin#directory#customer", + "description": "Identifies the resource as a customer. Value: `admin#directory#customer`", + "type": "string" + }, + "language": { + "description": "The customer's ISO 639-2 language code. See the [Language Codes](/admin-sdk/directory/v1/languages) page for the list of supported codes. Valid language codes outside the supported set will be accepted by the API but may lead to unexpected behavior. The default value is `en`.", + "type": "string" + }, + "phoneNumber": { + "description": "The customer's contact phone number in [E.164](https://en.wikipedia.org/wiki/E.164) format.", + "type": "string" + }, + "postalAddress": { + "$ref": "CustomerPostalAddress", + "description": "The customer's postal address information." + } + }, + "type": "object" + }, + "CustomerPostalAddress": { + "id": "CustomerPostalAddress", + "properties": { + "addressLine1": { + "description": "A customer's physical address. The address can be composed of one to three lines.", + "type": "string" + }, + "addressLine2": { + "description": "Address line 2 of the address.", + "type": "string" + }, + "addressLine3": { + "description": "Address line 3 of the address.", + "type": "string" + }, + "contactName": { + "description": "The customer contact's name.", + "type": "string" + }, + "countryCode": { + "description": "This is a required property. For `countryCode` information see the [ISO 3166 country code elements](https://www.iso.org/iso/country_codes.htm).", + "type": "string" + }, + "locality": { + "description": "Name of the locality. An example of a locality value is the city of `San Francisco`.", + "type": "string" + }, + "organizationName": { + "description": "The company or company division name.", + "type": "string" + }, + "postalCode": { + "description": "The postal code. A postalCode example is a postal zip code such as `10009`. This is in accordance with - http: //portablecontacts.net/draft-spec.html#address_element.", + "type": "string" + }, + "region": { + "description": "Name of the region. An example of a region value is `NY` for the state of New York.", + "type": "string" + } + }, + "type": "object" + }, + "DirectoryChromeosdevicesCommand": { + "description": "Information regarding a command that was issued to a device.", + "id": "DirectoryChromeosdevicesCommand", + "properties": { + "commandExpireTime": { + "description": "The time at which the command will expire. If the device doesn't execute the command within this time the command will become expired.", + "format": "google-datetime", + "type": "string" + }, + "commandId": { + "description": "Unique ID of a device command.", + "format": "int64", + "type": "string" + }, + "commandResult": { + "$ref": "DirectoryChromeosdevicesCommandResult", + "description": "The result of the command execution." + }, + "issueTime": { + "description": "The timestamp when the command was issued by the admin.", + "format": "google-datetime", + "type": "string" + }, + "payload": { + "description": "The payload that the command specified, if any.", + "type": "string" + }, + "state": { + "description": "Indicates the command state.", + "enum": [ + "STATE_UNSPECIFIED", + "PENDING", + "EXPIRED", + "CANCELLED", + "SENT_TO_CLIENT", + "ACKED_BY_CLIENT", + "EXECUTED_BY_CLIENT" + ], + "enumDescriptions": [ + "The command status was unspecified.", + "An unexpired command not yet sent to the client.", + "The command didn't get executed by the client within the expected time.", + "The command is cancelled by admin while in PENDING.", + "The command has been sent to the client.", + "The client has responded that it received the command.", + "The client has (un)successfully executed the command." + ], + "type": "string" + }, + "type": { + "description": "The type of the command.", + "enum": [ + "COMMAND_TYPE_UNSPECIFIED", + "REBOOT", + "TAKE_A_SCREENSHOT", + "SET_VOLUME", + "WIPE_USERS", + "REMOTE_POWERWASH", + "DEVICE_START_CRD_SESSION", + "CAPTURE_LOGS" + ], + "enumDescriptions": [ + "The command type was unspecified.", + "Reboot the device. Can only be issued to Kiosk and managed guest session devices.", + "Take a screenshot of the device. Only available if the device is in Kiosk Mode.", + "Set the volume of the device. Can only be issued to Kiosk and managed guest session devices.", + "Wipe all the users off of the device. Executing this command in the device will remove all user profile data, but it will keep device policy and enrollment.", + "Wipes the device by performing a power wash. Executing this command in the device will remove all data including user policies, device policies and enrollment policies. Warning: This will revert the device back to a factory state with no enrollment unless the device is subject to forced or auto enrollment. Use with caution, as this is an irreversible action!", + "Starts a Chrome Remote Desktop session.", + "Capture the system logs of a kiosk device. The logs can be downloaded from the downloadUrl link present in `deviceFiles` field of [chromeosdevices](https://developers.google.com/admin-sdk/directory/reference/rest/v1/chromeosdevices)" + ], + "type": "string" + } + }, + "type": "object" + }, + "DirectoryChromeosdevicesCommandResult": { + "description": "The result of executing a command.", + "id": "DirectoryChromeosdevicesCommandResult", + "properties": { + "commandResultPayload": { + "description": "The payload for the command result. The following commands respond with a payload: * `DEVICE_START_CRD_SESSION`: Payload is a stringified JSON object in the form: { \"url\": url }. The URL provides a link to the Chrome Remote Desktop session.", + "type": "string" + }, + "errorMessage": { + "description": "The error message with a short explanation as to why the command failed. Only present if the command failed.", + "type": "string" + }, + "executeTime": { + "description": "The time at which the command was executed or failed to execute.", + "format": "google-datetime", + "type": "string" + }, + "result": { + "description": "The result of the command.", + "enum": [ + "COMMAND_RESULT_TYPE_UNSPECIFIED", + "IGNORED", + "FAILURE", + "SUCCESS" + ], + "enumDescriptions": [ + "The command result was unspecified.", + "The command was ignored as obsolete.", + "The command could not be executed successfully.", + "The command was successfully executed." + ], + "type": "string" + } + }, + "type": "object" + }, + "DirectoryChromeosdevicesIssueCommandRequest": { + "description": "A request for issuing a command.", + "id": "DirectoryChromeosdevicesIssueCommandRequest", + "properties": { + "commandType": { + "description": "The type of command.", + "enum": [ + "COMMAND_TYPE_UNSPECIFIED", + "REBOOT", + "TAKE_A_SCREENSHOT", + "SET_VOLUME", + "WIPE_USERS", + "REMOTE_POWERWASH", + "DEVICE_START_CRD_SESSION", + "CAPTURE_LOGS" + ], + "enumDescriptions": [ + "The command type was unspecified.", + "Reboot the device. Can only be issued to Kiosk and managed guest session devices.", + "Take a screenshot of the device. Only available if the device is in Kiosk Mode.", + "Set the volume of the device. Can only be issued to Kiosk and managed guest session devices.", + "Wipe all the users off of the device. Executing this command in the device will remove all user profile data, but it will keep device policy and enrollment.", + "Wipes the device by performing a power wash. Executing this command in the device will remove all data including user policies, device policies and enrollment policies. Warning: This will revert the device back to a factory state with no enrollment unless the device is subject to forced or auto enrollment. Use with caution, as this is an irreversible action!", + "Starts a Chrome Remote Desktop session.", + "Capture the system logs of a kiosk device. The logs can be downloaded from the downloadUrl link present in `deviceFiles` field of [chromeosdevices](https://developers.google.com/admin-sdk/directory/reference/rest/v1/chromeosdevices)" + ], + "type": "string" + }, + "payload": { + "description": "The payload for the command, provide it only if command supports it. The following commands support adding payload: * `SET_VOLUME`: Payload is a stringified JSON object in the form: { \"volume\": 50 }. The volume has to be an integer in the range [0,100]. * `DEVICE_START_CRD_SESSION`: Payload is optionally a stringified JSON object in the form: { \"ackedUserPresence\": true }. `ackedUserPresence` is a boolean. By default, `ackedUserPresence` is set to `false`. To start a Chrome Remote Desktop session for an active device, set `ackedUserPresence` to `true`.", + "type": "string" + } + }, + "type": "object" + }, + "DirectoryChromeosdevicesIssueCommandResponse": { + "description": "A response for issuing a command.", + "id": "DirectoryChromeosdevicesIssueCommandResponse", + "properties": { + "commandId": { + "description": "The unique ID of the issued command, used to retrieve the command status.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "DomainAlias": { + "id": "DomainAlias", + "properties": { + "creationTime": { + "description": "The creation time of the domain alias. (Read-only).", + "format": "int64", + "type": "string" + }, + "domainAliasName": { + "description": "The domain alias name.", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#domainAlias", + "description": "Kind of resource this is.", + "type": "string" + }, + "parentDomainName": { + "annotations": { + "required": [ + "directory.domains.insert" + ] + }, + "description": "The parent domain name that the domain alias is associated with. This can either be a primary or secondary domain name within a customer.", + "type": "string" + }, + "verified": { + "description": "Indicates the verification state of a domain alias. (Read-only)", + "type": "boolean" + } + }, + "type": "object" + }, + "DomainAliases": { + "id": "DomainAliases", + "properties": { + "domainAliases": { + "description": "A list of domain alias objects.", + "items": { + "$ref": "DomainAlias" + }, + "type": "array" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#domainAliases", + "description": "Kind of resource this is.", + "type": "string" + } + }, + "type": "object" + }, + "Domains": { + "id": "Domains", + "properties": { + "creationTime": { + "description": "Creation time of the domain. Expressed in [Unix time](https://en.wikipedia.org/wiki/Epoch_time) format. (Read-only).", + "format": "int64", + "type": "string" + }, + "domainAliases": { + "description": "A list of domain alias objects. (Read-only)", + "items": { + "$ref": "DomainAlias" + }, + "type": "array" + }, + "domainName": { + "annotations": { + "required": [ + "directory.domains.insert" + ] + }, + "description": "The domain name of the customer.", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "isPrimary": { + "description": "Indicates if the domain is a primary domain (Read-only).", + "type": "boolean" + }, + "kind": { + "default": "admin#directory#domain", + "description": "Kind of resource this is.", + "type": "string" + }, + "verified": { + "description": "Indicates the verification state of a domain. (Read-only).", + "type": "boolean" + } + }, + "type": "object" + }, + "Domains2": { + "id": "Domains2", + "properties": { + "domains": { + "description": "A list of domain objects.", + "items": { + "$ref": "Domains" + }, + "type": "array" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#domains", + "description": "Kind of resource this is.", + "type": "string" + } + }, + "type": "object" + }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", + "id": "Empty", + "properties": {}, + "type": "object" + }, + "FailureInfo": { + "description": "Info about failures", + "id": "FailureInfo", + "properties": { + "errorCode": { + "description": "Canonical code for why the update failed to apply.", + "enum": [ + "OK", + "CANCELLED", + "UNKNOWN", + "INVALID_ARGUMENT", + "DEADLINE_EXCEEDED", + "NOT_FOUND", + "ALREADY_EXISTS", + "PERMISSION_DENIED", + "UNAUTHENTICATED", + "RESOURCE_EXHAUSTED", + "FAILED_PRECONDITION", + "ABORTED", + "OUT_OF_RANGE", + "UNIMPLEMENTED", + "INTERNAL", + "UNAVAILABLE", + "DATA_LOSS" + ], + "enumDescriptions": [ + "Not an error; returned on success. HTTP Mapping: 200 OK", + "The operation was cancelled, typically by the caller. HTTP Mapping: 499 Client Closed Request", + "Unknown error. For example, this error may be returned when a `Status` value received from another address space belongs to an error space that is not known in this address space. Also errors raised by APIs that do not return enough error information may be converted to this error. HTTP Mapping: 500 Internal Server Error", + "The client specified an invalid argument. Note that this differs from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments that are problematic regardless of the state of the system (e.g., a malformed file name). HTTP Mapping: 400 Bad Request", + "The deadline expired before the operation could complete. For operations that change the state of the system, this error may be returned even if the operation has completed successfully. For example, a successful response from a server could have been delayed long enough for the deadline to expire. HTTP Mapping: 504 Gateway Timeout", + "Some requested entity (e.g., file or directory) was not found. Note to server developers: if a request is denied for an entire class of users, such as gradual feature rollout or undocumented allowlist, `NOT_FOUND` may be used. If a request is denied for some users within a class of users, such as user-based access control, `PERMISSION_DENIED` must be used. HTTP Mapping: 404 Not Found", + "The entity that a client attempted to create (e.g., file or directory) already exists. HTTP Mapping: 409 Conflict", + "The caller does not have permission to execute the specified operation. `PERMISSION_DENIED` must not be used for rejections caused by exhausting some resource (use `RESOURCE_EXHAUSTED` instead for those errors). `PERMISSION_DENIED` must not be used if the caller can not be identified (use `UNAUTHENTICATED` instead for those errors). This error code does not imply the request is valid or the requested entity exists or satisfies other pre-conditions. HTTP Mapping: 403 Forbidden", + "The request does not have valid authentication credentials for the operation. HTTP Mapping: 401 Unauthorized", + "Some resource has been exhausted, perhaps a per-user quota, or perhaps the entire file system is out of space. HTTP Mapping: 429 Too Many Requests", + "The operation was rejected because the system is not in a state required for the operation's execution. For example, the directory to be deleted is non-empty, an rmdir operation is applied to a non-directory, etc. Service implementors can use the following guidelines to decide between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: (a) Use `UNAVAILABLE` if the client can retry just the failing call. (b) Use `ABORTED` if the client should retry at a higher level. For example, when a client-specified test-and-set fails, indicating the client should restart a read-modify-write sequence. (c) Use `FAILED_PRECONDITION` if the client should not retry until the system state has been explicitly fixed. For example, if an \"rmdir\" fails because the directory is non-empty, `FAILED_PRECONDITION` should be returned since the client should not retry unless the files are deleted from the directory. HTTP Mapping: 400 Bad Request", + "The operation was aborted, typically due to a concurrency issue such as a sequencer check failure or transaction abort. See the guidelines above for deciding between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: 409 Conflict", + "The operation was attempted past the valid range. E.g., seeking or reading past end-of-file. Unlike `INVALID_ARGUMENT`, this error indicates a problem that may be fixed if the system state changes. For example, a 32-bit file system will generate `INVALID_ARGUMENT` if asked to read at an offset that is not in the range [0,2^32-1], but it will generate `OUT_OF_RANGE` if asked to read from an offset past the current file size. There is a fair bit of overlap between `FAILED_PRECONDITION` and `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific error) when it applies so that callers who are iterating through a space can easily look for an `OUT_OF_RANGE` error to detect when they are done. HTTP Mapping: 400 Bad Request", + "The operation is not implemented or is not supported/enabled in this service. HTTP Mapping: 501 Not Implemented", + "Internal errors. This means that some invariants expected by the underlying system have been broken. This error code is reserved for serious errors. HTTP Mapping: 500 Internal Server Error", + "The service is currently unavailable. This is most likely a transient condition, which can be corrected by retrying with a backoff. Note that it is not always safe to retry non-idempotent operations. See the guidelines above for deciding between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: 503 Service Unavailable", + "Unrecoverable data loss or corruption. HTTP Mapping: 500 Internal Server Error" + ], + "type": "string" + }, + "errorMessage": { + "description": "Failure reason message.", + "type": "string" + }, + "printer": { + "$ref": "Printer", + "description": "Failed printer." + }, + "printerId": { + "description": "Id of a failed printer.", + "type": "string" + } + }, + "type": "object" + }, + "Feature": { + "description": "JSON template for Feature object in Directory API.", + "id": "Feature", + "properties": { + "etags": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#resources#features#Feature", + "description": "Kind of resource this is.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "directory.resources.features.insert" + ] + }, + "description": "The name of the feature.", + "type": "string" + } + }, + "type": "object" + }, + "FeatureInstance": { + "description": "JSON template for a feature instance.", + "id": "FeatureInstance", + "properties": { + "feature": { + "$ref": "Feature", + "description": "The feature that this is an instance of. A calendar resource may have multiple instances of a feature." + } + }, + "type": "object" + }, + "FeatureRename": { + "id": "FeatureRename", + "properties": { + "newName": { + "annotations": { + "required": [ + "directory.resources.features.rename" + ] + }, + "description": "New name of the feature.", + "type": "string" + } + }, + "type": "object" + }, + "Features": { + "description": "Public API: Resources.features", + "id": "Features", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "features": { + "description": "The Features in this page of results.", + "items": { + "$ref": "Feature" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#resources#features#featuresList", + "description": "Kind of resource this is.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "Group": { + "description": "Google Groups provide your users the ability to send messages to groups of people using the group's email address. For more information about common tasks, see the [Developer's Guide](https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups). For information about other types of groups, see the [Cloud Identity Groups API documentation](https://cloud.google.com/identity/docs/groups). Note: The user calling the API (or being impersonated by a service account) must have an assigned [role](https://developers.google.com/admin-sdk/directory/v1/guides/manage-roles) that includes Admin API Groups permissions, such as Super Admin or Groups Admin.", + "id": "Group", + "properties": { + "adminCreated": { + "description": "Read-only. Value is `true` if this group was created by an administrator rather than a user.", + "type": "boolean" + }, + "aliases": { + "description": "Read-only. The list of a group's alias email addresses. To add, update, or remove a group's aliases, use the `groups.aliases` methods. If edited in a group's POST or PUT request, the edit is ignored.", + "items": { + "type": "string" + }, + "type": "array" + }, + "description": { + "description": "An extended description to help users determine the purpose of a group. For example, you can include information about who should join the group, the types of messages to send to the group, links to FAQs about the group, or related groups. Maximum length is `4,096` characters.", + "type": "string" + }, + "directMembersCount": { + "description": "The number of users that are direct members of the group. If a group is a member (child) of this group (the parent), members of the child group are not counted in the `directMembersCount` property of the parent group.", + "format": "int64", + "type": "string" + }, + "email": { + "annotations": { + "required": [ + "directory.groups.insert" + ] + }, + "description": "The group's email address. If your account has multiple domains, select the appropriate domain for the email address. The `email` must be unique. This property is required when creating a group. Group email addresses are subject to the same character usage rules as usernames, see the [help center](https://support.google.com/a/answer/9193374) for details.", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "id": { + "description": "Read-only. The unique ID of a group. A group `id` can be used as a group request URI's `groupKey`.", + "type": "string" + }, + "kind": { + "default": "admin#directory#group", + "description": "The type of the API resource. For Groups resources, the value is `admin#directory#group`.", + "type": "string" + }, + "name": { + "description": "The group's display name.", + "type": "string" + }, + "nonEditableAliases": { + "description": "Read-only. The list of the group's non-editable alias email addresses that are outside of the account's primary domain or subdomains. These are functioning email addresses used by the group. This is a read-only property returned in the API's response for a group. If edited in a group's POST or PUT request, the edit is ignored.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GroupAlias": { + "description": "The Directory API manages aliases, which are alternative email addresses.", + "id": "GroupAlias", + "properties": { + "alias": { + "description": "The alias email address.", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "id": { + "description": "The unique ID of the group.", + "type": "string" + }, + "kind": { + "description": "The type of the API resource. For Alias resources, the value is `admin#directory#alias`.", + "type": "string" + }, + "primaryEmail": { + "description": "The primary email address of the group.", + "type": "string" + } + }, + "type": "object" + }, + "Groups": { + "id": "Groups", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "groups": { + "description": "A list of group objects.", + "items": { + "$ref": "Group" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#groups", + "description": "Kind of resource this is.", + "type": "string" + }, + "nextPageToken": { + "description": "Token used to access next page of this result.", + "type": "string" + } + }, + "type": "object" + }, + "ListPrintServersResponse": { + "id": "ListPrintServersResponse", + "properties": { + "nextPageToken": { + "description": "A token that can be sent as `page_token` in a request to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + }, + "printServers": { + "description": "List of print servers.", + "items": { + "$ref": "PrintServer" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListPrinterModelsResponse": { + "description": "Response for listing allowed printer models.", + "id": "ListPrinterModelsResponse", + "properties": { + "nextPageToken": { + "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + }, + "printerModels": { + "description": "Printer models that are currently allowed to be configured for ChromeOs. Some printers may be added or removed over time.", + "items": { + "$ref": "PrinterModel" + }, + "type": "array" + } + }, + "type": "object" + }, + "ListPrintersResponse": { + "description": "Response for listing printers.", + "id": "ListPrintersResponse", + "properties": { + "nextPageToken": { + "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + }, + "printers": { + "description": "List of printers. If `org_unit_id` was given in the request, then only printers visible for this OU will be returned. If `org_unit_id` was not given in the request, then all printers will be returned.", + "items": { + "$ref": "Printer" + }, + "type": "array" + } + }, + "type": "object" + }, + "Member": { + "description": "A Google Groups member can be a user or another group. This member can be inside or outside of your account's domains. For more information about common group member tasks, see the [Developer's Guide](/admin-sdk/directory/v1/guides/manage-group-members).", + "id": "Member", + "properties": { + "delivery_settings": { + "description": "Defines mail delivery preferences of member. This field is only supported by `insert`, `update`, and `get` methods.", + "type": "string" + }, + "email": { + "description": "The member's email address. A member can be a user or another group. This property is required when adding a member to a group. The `email` must be unique and cannot be an alias of another group. If the email address is changed, the API automatically reflects the email address changes.", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "id": { + "description": "The unique ID of the group member. A member `id` can be used as a member request URI's `memberKey`.", + "type": "string" + }, + "kind": { + "default": "admin#directory#member", + "description": "The type of the API resource. For Members resources, the value is `admin#directory#member`.", + "type": "string" + }, + "role": { + "description": "The member's role in a group. The API returns an error for cycles in group memberships. For example, if `group1` is a member of `group2`, `group2` cannot be a member of `group1`. For more information about a member's role, see the [administration help center](https://support.google.com/a/answer/167094).", + "type": "string" + }, + "status": { + "description": "Status of member (Immutable)", + "type": "string" + }, + "type": { + "description": "The type of group member.", + "type": "string" + } + }, + "type": "object" + }, + "Members": { + "id": "Members", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#members", + "description": "Kind of resource this is.", + "type": "string" + }, + "members": { + "description": "A list of member objects.", + "items": { + "$ref": "Member" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token used to access next page of this result.", + "type": "string" + } + }, + "type": "object" + }, + "MembersHasMember": { + "description": "JSON template for Has Member response in Directory API.", + "id": "MembersHasMember", + "properties": { + "isMember": { + "description": "Output only. Identifies whether the given user is a member of the group. Membership can be direct or nested.", + "readOnly": true, + "type": "boolean" + } + }, + "type": "object" + }, + "MobileDevice": { + "description": "Google Workspace Mobile Management includes Android, [Google Sync](https://support.google.com/a/answer/135937), and iOS devices. For more information about common group mobile device API tasks, see the [Developer's Guide](/admin-sdk/directory/v1/guides/manage-mobile-devices.html).", + "id": "MobileDevice", + "properties": { + "adbStatus": { + "description": "Adb (USB debugging) enabled or disabled on device (Read-only)", + "type": "boolean" + }, + "applications": { + "description": "The list of applications installed on an Android mobile device. It is not applicable to Google Sync and iOS devices. The list includes any Android applications that access Google Workspace data. When updating an applications list, it is important to note that updates replace the existing list. If the Android device has two existing applications and the API updates the list with five applications, the is now the updated list of five applications.", + "items": { + "properties": { + "displayName": { + "description": "The application's display name. An example is `Browser`.", + "type": "string" + }, + "packageName": { + "description": "The application's package name. An example is `com.android.browser`.", + "type": "string" + }, + "permission": { + "description": "The list of permissions of this application. These can be either a standard Android permission or one defined by the application, and are found in an application's [Android manifest](https://developer.android.com/guide/topics/manifest/uses-permission-element.html). Examples of a Calendar application's permissions are `READ_CALENDAR`, or `MANAGE_ACCOUNTS`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "versionCode": { + "description": "The application's version code. An example is `13`.", + "format": "int32", + "type": "integer" + }, + "versionName": { + "description": "The application's version name. An example is `3.2-140714`.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "basebandVersion": { + "description": "The device's baseband version.", + "type": "string" + }, + "bootloaderVersion": { + "description": "Mobile Device Bootloader version (Read-only)", + "type": "string" + }, + "brand": { + "description": "Mobile Device Brand (Read-only)", + "type": "string" + }, + "buildNumber": { + "description": "The device's operating system build number.", + "type": "string" + }, + "defaultLanguage": { + "description": "The default locale used on the device.", + "type": "string" + }, + "developerOptionsStatus": { + "description": "Developer options enabled or disabled on device (Read-only)", + "type": "boolean" + }, + "deviceCompromisedStatus": { + "description": "The compromised device status.", + "type": "string" + }, + "deviceId": { + "description": "The serial number for a Google Sync mobile device. For Android and iOS devices, this is a software generated unique identifier.", + "type": "string" + }, + "devicePasswordStatus": { + "description": "DevicePasswordStatus (Read-only)", + "type": "string" + }, + "email": { + "description": "The list of the owner's email addresses. If your application needs the current list of user emails, use the [get](/admin-sdk/directory/v1/reference/mobiledevices/get.html) method. For additional information, see the [retrieve a user](/admin-sdk/directory/v1/guides/manage-users#get_user) method.", + "items": { + "type": "string" + }, + "type": "array" + }, + "encryptionStatus": { + "description": "Mobile Device Encryption Status (Read-only)", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "firstSync": { + "description": "Date and time the device was first synchronized with the policy settings in the G Suite administrator control panel (Read-only)", + "format": "date-time", + "type": "string" + }, + "hardware": { + "description": "Mobile Device Hardware (Read-only)", + "type": "string" + }, + "hardwareId": { + "description": "The IMEI/MEID unique identifier for Android hardware. It is not applicable to Google Sync devices. When adding an Android mobile device, this is an optional property. When updating one of these devices, this is a read-only property.", + "type": "string" + }, + "imei": { + "description": "The device's IMEI number.", + "type": "string" + }, + "kernelVersion": { + "description": "The device's kernel version.", + "type": "string" + }, + "kind": { + "default": "admin#directory#mobiledevice", + "description": "The type of the API resource. For Mobiledevices resources, the value is `admin#directory#mobiledevice`.", + "type": "string" + }, + "lastSync": { + "description": "Date and time the device was last synchronized with the policy settings in the G Suite administrator control panel (Read-only)", + "format": "date-time", + "type": "string" + }, + "managedAccountIsOnOwnerProfile": { + "description": "Boolean indicating if this account is on owner/primary profile or not.", + "type": "boolean" + }, + "manufacturer": { + "description": "Mobile Device manufacturer (Read-only)", + "type": "string" + }, + "meid": { + "description": "The device's MEID number.", + "type": "string" + }, + "model": { + "description": "The mobile device's model name, for example Nexus S. This property can be [updated](/admin-sdk/directory/v1/reference/mobiledevices/update.html). For more information, see the [Developer's Guide](/admin-sdk/directory/v1/guides/manage-mobile=devices#update_mobile_device).", + "type": "string" + }, + "name": { + "description": "The list of the owner's user names. If your application needs the current list of device owner names, use the [get](/admin-sdk/directory/v1/reference/mobiledevices/get.html) method. For more information about retrieving mobile device user information, see the [Developer's Guide](/admin-sdk/directory/v1/guides/manage-users#get_user).", + "items": { + "type": "string" + }, + "type": "array" + }, + "networkOperator": { + "description": "Mobile Device mobile or network operator (if available) (Read-only)", + "type": "string" + }, + "os": { + "description": "The mobile device's operating system, for example IOS 4.3 or Android 2.3.5. This property can be [updated](/admin-sdk/directory/v1/reference/mobiledevices/update.html). For more information, see the [Developer's Guide](/admin-sdk/directory/v1/guides/manage-mobile-devices#update_mobile_device).", + "type": "string" + }, + "otherAccountsInfo": { + "description": "The list of accounts added on device (Read-only)", + "items": { + "type": "string" + }, + "type": "array" + }, + "privilege": { + "description": "DMAgentPermission (Read-only)", + "type": "string" + }, + "releaseVersion": { + "description": "Mobile Device release version version (Read-only)", + "type": "string" + }, + "resourceId": { + "description": "The unique ID the API service uses to identify the mobile device.", + "type": "string" + }, + "securityPatchLevel": { + "description": "Mobile Device Security patch level (Read-only)", + "format": "int64", + "type": "string" + }, + "serialNumber": { + "description": "The device's serial number.", + "type": "string" + }, + "status": { + "description": "The device's status.", + "type": "string" + }, + "supportsWorkProfile": { + "description": "Work profile supported on device (Read-only)", + "type": "boolean" + }, + "type": { + "description": "The type of mobile device.", + "type": "string" + }, + "unknownSourcesStatus": { + "description": "Unknown sources enabled or disabled on device (Read-only)", + "type": "boolean" + }, + "userAgent": { + "description": "Gives information about the device such as `os` version. This property can be [updated](/admin-sdk/directory/v1/reference/mobiledevices/update.html). For more information, see the [Developer's Guide](/admin-sdk/directory/v1/guides/manage-mobile-devices#update_mobile_device).", + "type": "string" + }, + "wifiMacAddress": { + "description": "The device's MAC address on Wi-Fi networks.", + "type": "string" + } + }, + "type": "object" + }, + "MobileDeviceAction": { + "id": "MobileDeviceAction", + "properties": { + "action": { + "annotations": { + "required": [ + "directory.mobiledevices.action" + ] + }, + "description": "The action to be performed on the device.", + "type": "string" + } + }, + "type": "object" + }, + "MobileDevices": { + "id": "MobileDevices", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#mobiledevices", + "description": "Kind of resource this is.", + "type": "string" + }, + "mobiledevices": { + "description": "A list of Mobile Device objects.", + "items": { + "$ref": "MobileDevice" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token used to access next page of this result.", + "type": "string" + } + }, + "type": "object" + }, + "OrgUnit": { + "description": "Managing your account's organizational units allows you to configure your users' access to services and custom settings. For more information about common organizational unit tasks, see the [Developer's Guide](/admin-sdk/directory/v1/guides/manage-org-units.html). The customer's organizational unit hierarchy is limited to 35 levels of depth.", + "id": "OrgUnit", + "properties": { + "blockInheritance": { + "description": "Determines if a sub-organizational unit can inherit the settings of the parent organization. The default value is `false`, meaning a sub-organizational unit inherits the settings of the nearest parent organizational unit. We recommend using the default value because setting `block_inheritance` to `true` can have _unintended consequences_. For more information about inheritance and users in an organization structure, see the [administration help center](https://support.google.com/a/answer/4352075).", + "type": "boolean" + }, + "description": { + "description": "Description of the organizational unit.", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#orgUnit", + "description": "The type of the API resource. For Orgunits resources, the value is `admin#directory#orgUnit`.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "directory.orgunits.insert" + ] + }, + "description": "The organizational unit's path name. For example, an organizational unit's name within the /corp/support/sales_support parent path is sales_support. Required.", + "type": "string" + }, + "orgUnitId": { + "description": "The unique ID of the organizational unit.", + "type": "string" + }, + "orgUnitPath": { + "description": "The full path to the organizational unit. The `orgUnitPath` is a derived property. When listed, it is derived from `parentOrgunitPath` and organizational unit's `name`. For example, for an organizational unit named 'apps' under parent organization '/engineering', the orgUnitPath is '/engineering/apps'. In order to edit an `orgUnitPath`, either update the name of the organization or the `parentOrgunitPath`. A user's organizational unit determines which Google Workspace services the user has access to. If the user is moved to a new organization, the user's access changes. For more information about organization structures, see the [administration help center](https://support.google.com/a/answer/4352075). For more information about moving a user to a different organization, see [Update a user](/admin-sdk/directory/v1/guides/manage-users.html#update_user).", + "type": "string" + }, + "parentOrgUnitId": { + "description": "The unique ID of the parent organizational unit. Required, unless `parentOrgUnitPath` is set.", + "type": "string" + }, + "parentOrgUnitPath": { + "description": "The organizational unit's parent path. For example, /corp/sales is the parent path for /corp/sales/sales_support organizational unit. Required, unless `parentOrgUnitId` is set.", + "type": "string" + } + }, + "type": "object" + }, + "OrgUnits": { + "id": "OrgUnits", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#orgUnits", + "description": "The type of the API resource. For Org Unit resources, the type is `admin#directory#orgUnits`.", + "type": "string" + }, + "organizationUnits": { + "description": "A list of organizational unit objects.", + "items": { + "$ref": "OrgUnit" + }, + "type": "array" + } + }, + "type": "object" + }, + "OsUpdateStatus": { + "description": "Contains information regarding the current OS update status.", + "id": "OsUpdateStatus", + "properties": { + "rebootTime": { + "description": "Date and time of the last reboot.", + "type": "string" + }, + "state": { + "description": "The update state of an OS update.", + "enum": [ + "updateStateUnspecified", + "updateStateNotStarted", + "updateStateDownloadInProgress", + "updateStateNeedReboot" + ], + "enumDescriptions": [ + "The update state is unspecified.", + "There is an update pending but it hasn't started.", + "The pending update is being downloaded.", + "The device is ready to install the update, but must reboot." + ], + "type": "string" + }, + "targetKioskAppVersion": { + "description": "New required platform version from the pending updated kiosk app.", + "type": "string" + }, + "targetOsVersion": { + "description": "New platform version of the OS image being downloaded and applied. It is only set when update status is UPDATE_STATUS_DOWNLOAD_IN_PROGRESS or UPDATE_STATUS_NEED_REBOOT. Note this could be a dummy \"0.0.0.0\" for UPDATE_STATUS_NEED_REBOOT for some edge cases, e.g. update engine is restarted without a reboot.", + "type": "string" + }, + "updateCheckTime": { + "description": "Date and time of the last update check.", + "type": "string" + }, + "updateTime": { + "description": "Date and time of the last successful OS update.", + "type": "string" + } + }, + "type": "object" + }, + "PrintServer": { + "description": "Configuration for a print server.", + "id": "PrintServer", + "properties": { + "createTime": { + "description": "Output only. Time when the print server was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Editable. Description of the print server (as shown in the Admin console).", + "type": "string" + }, + "displayName": { + "description": "Editable. Display name of the print server (as shown in the Admin console).", + "type": "string" + }, + "id": { + "description": "Immutable. ID of the print server. Leave empty when creating.", + "type": "string" + }, + "name": { + "description": "Immutable. Resource name of the print server. Leave empty when creating. Format: `customers/{customer.id}/printServers/{print_server.id}`", + "type": "string" + }, + "orgUnitId": { + "description": "ID of the organization unit (OU) that owns this print server. This value can only be set when the print server is initially created. If it's not populated, the print server is placed under the root OU. The `org_unit_id` can be retrieved using the [Directory API](/admin-sdk/directory/reference/rest/v1/orgunits).", + "type": "string" + }, + "uri": { + "description": "Editable. Print server URI.", + "type": "string" + } + }, + "type": "object" + }, + "PrintServerFailureInfo": { + "description": "Info about failures", + "id": "PrintServerFailureInfo", + "properties": { + "errorCode": { + "description": "Canonical code for why the update failed to apply.", + "enum": [ + "OK", + "CANCELLED", + "UNKNOWN", + "INVALID_ARGUMENT", + "DEADLINE_EXCEEDED", + "NOT_FOUND", + "ALREADY_EXISTS", + "PERMISSION_DENIED", + "UNAUTHENTICATED", + "RESOURCE_EXHAUSTED", + "FAILED_PRECONDITION", + "ABORTED", + "OUT_OF_RANGE", + "UNIMPLEMENTED", + "INTERNAL", + "UNAVAILABLE", + "DATA_LOSS" + ], + "enumDescriptions": [ + "Not an error; returned on success. HTTP Mapping: 200 OK", + "The operation was cancelled, typically by the caller. HTTP Mapping: 499 Client Closed Request", + "Unknown error. For example, this error may be returned when a `Status` value received from another address space belongs to an error space that is not known in this address space. Also errors raised by APIs that do not return enough error information may be converted to this error. HTTP Mapping: 500 Internal Server Error", + "The client specified an invalid argument. Note that this differs from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments that are problematic regardless of the state of the system (e.g., a malformed file name). HTTP Mapping: 400 Bad Request", + "The deadline expired before the operation could complete. For operations that change the state of the system, this error may be returned even if the operation has completed successfully. For example, a successful response from a server could have been delayed long enough for the deadline to expire. HTTP Mapping: 504 Gateway Timeout", + "Some requested entity (e.g., file or directory) was not found. Note to server developers: if a request is denied for an entire class of users, such as gradual feature rollout or undocumented allowlist, `NOT_FOUND` may be used. If a request is denied for some users within a class of users, such as user-based access control, `PERMISSION_DENIED` must be used. HTTP Mapping: 404 Not Found", + "The entity that a client attempted to create (e.g., file or directory) already exists. HTTP Mapping: 409 Conflict", + "The caller does not have permission to execute the specified operation. `PERMISSION_DENIED` must not be used for rejections caused by exhausting some resource (use `RESOURCE_EXHAUSTED` instead for those errors). `PERMISSION_DENIED` must not be used if the caller can not be identified (use `UNAUTHENTICATED` instead for those errors). This error code does not imply the request is valid or the requested entity exists or satisfies other pre-conditions. HTTP Mapping: 403 Forbidden", + "The request does not have valid authentication credentials for the operation. HTTP Mapping: 401 Unauthorized", + "Some resource has been exhausted, perhaps a per-user quota, or perhaps the entire file system is out of space. HTTP Mapping: 429 Too Many Requests", + "The operation was rejected because the system is not in a state required for the operation's execution. For example, the directory to be deleted is non-empty, an rmdir operation is applied to a non-directory, etc. Service implementors can use the following guidelines to decide between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: (a) Use `UNAVAILABLE` if the client can retry just the failing call. (b) Use `ABORTED` if the client should retry at a higher level. For example, when a client-specified test-and-set fails, indicating the client should restart a read-modify-write sequence. (c) Use `FAILED_PRECONDITION` if the client should not retry until the system state has been explicitly fixed. For example, if an \"rmdir\" fails because the directory is non-empty, `FAILED_PRECONDITION` should be returned since the client should not retry unless the files are deleted from the directory. HTTP Mapping: 400 Bad Request", + "The operation was aborted, typically due to a concurrency issue such as a sequencer check failure or transaction abort. See the guidelines above for deciding between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: 409 Conflict", + "The operation was attempted past the valid range. E.g., seeking or reading past end-of-file. Unlike `INVALID_ARGUMENT`, this error indicates a problem that may be fixed if the system state changes. For example, a 32-bit file system will generate `INVALID_ARGUMENT` if asked to read at an offset that is not in the range [0,2^32-1], but it will generate `OUT_OF_RANGE` if asked to read from an offset past the current file size. There is a fair bit of overlap between `FAILED_PRECONDITION` and `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific error) when it applies so that callers who are iterating through a space can easily look for an `OUT_OF_RANGE` error to detect when they are done. HTTP Mapping: 400 Bad Request", + "The operation is not implemented or is not supported/enabled in this service. HTTP Mapping: 501 Not Implemented", + "Internal errors. This means that some invariants expected by the underlying system have been broken. This error code is reserved for serious errors. HTTP Mapping: 500 Internal Server Error", + "The service is currently unavailable. This is most likely a transient condition, which can be corrected by retrying with a backoff. Note that it is not always safe to retry non-idempotent operations. See the guidelines above for deciding between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: 503 Service Unavailable", + "Unrecoverable data loss or corruption. HTTP Mapping: 500 Internal Server Error" + ], + "type": "string" + }, + "errorMessage": { + "description": "Failure reason message.", + "type": "string" + }, + "printServer": { + "$ref": "PrintServer", + "description": "Failed print server." + }, + "printServerId": { + "description": "ID of a failed print server.", + "type": "string" + } + }, + "type": "object" + }, + "Printer": { + "description": "Printer configuration.", + "id": "Printer", + "properties": { + "auxiliaryMessages": { + "description": "Output only. Auxiliary messages about issues with the printer configuration if any.", + "items": { + "$ref": "AuxiliaryMessage" + }, + "readOnly": true, + "type": "array" + }, + "createTime": { + "description": "Output only. Time when printer was created.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "description": { + "description": "Editable. Description of printer.", + "type": "string" + }, + "displayName": { + "description": "Editable. Name of printer.", + "type": "string" + }, + "id": { + "description": "Id of the printer. (During printer creation leave empty)", + "type": "string" + }, + "makeAndModel": { + "description": "Editable. Make and model of printer. e.g. Lexmark MS610de Value must be in format as seen in ListPrinterModels response.", + "type": "string" + }, + "name": { + "description": "The resource name of the Printer object, in the format customers/{customer-id}/printers/{printer-id} (During printer creation leave empty)", + "type": "string" + }, + "orgUnitId": { + "description": "Organization Unit that owns this printer (Only can be set during Printer creation)", + "type": "string" + }, + "uri": { + "description": "Editable. Printer URI.", + "type": "string" + }, + "useDriverlessConfig": { + "description": "Editable. flag to use driverless configuration or not. If it's set to be true, make_and_model can be ignored", + "type": "boolean" + } + }, + "type": "object" + }, + "PrinterModel": { + "description": "Printer manufacturer and model", + "id": "PrinterModel", + "properties": { + "displayName": { + "description": "Display name. eq. \"Brother MFC-8840D\"", + "type": "string" + }, + "makeAndModel": { + "description": "Make and model as represented in \"make_and_model\" field in Printer object. eq. \"brother mfc-8840d\"", + "type": "string" + }, + "manufacturer": { + "description": "Manufacturer. eq. \"Brother\"", + "type": "string" + } + }, + "type": "object" + }, + "Privilege": { + "id": "Privilege", + "properties": { + "childPrivileges": { + "description": "A list of child privileges. Privileges for a service form a tree. Each privilege can have a list of child privileges; this list is empty for a leaf privilege.", + "items": { + "$ref": "Privilege" + }, + "type": "array" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "isOuScopable": { + "description": "If the privilege can be restricted to an organization unit.", + "type": "boolean" + }, + "kind": { + "default": "admin#directory#privilege", + "description": "The type of the API resource. This is always `admin#directory#privilege`.", + "type": "string" + }, + "privilegeName": { + "description": "The name of the privilege.", + "type": "string" + }, + "serviceId": { + "description": "The obfuscated ID of the service this privilege is for. This value is returned with [`Privileges.list()`](/admin-sdk/directory/v1/reference/privileges/list).", + "type": "string" + }, + "serviceName": { + "description": "The name of the service this privilege is for.", + "type": "string" + } + }, + "type": "object" + }, + "Privileges": { + "id": "Privileges", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "items": { + "description": "A list of Privilege resources.", + "items": { + "$ref": "Privilege" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#privileges", + "description": "The type of the API resource. This is always `admin#directory#privileges`.", + "type": "string" + } + }, + "type": "object" + }, + "Role": { + "id": "Role", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "isSuperAdminRole": { + "description": "Returns `true` if the role is a super admin role.", + "type": "boolean" + }, + "isSystemRole": { + "description": "Returns `true` if this is a pre-defined system role.", + "type": "boolean" + }, + "kind": { + "default": "admin#directory#role", + "description": "The type of the API resource. This is always `admin#directory#role`.", + "type": "string" + }, + "roleDescription": { + "description": "A short description of the role.", + "type": "string" + }, + "roleId": { + "description": "ID of the role.", + "format": "int64", + "type": "string" + }, + "roleName": { + "annotations": { + "required": [ + "directory.roles.insert" + ] + }, + "description": "Name of the role.", + "type": "string" + }, + "rolePrivileges": { + "annotations": { + "required": [ + "directory.roles.insert" + ] + }, + "description": "The set of privileges that are granted to this role.", + "items": { + "properties": { + "privilegeName": { + "description": "The name of the privilege.", + "type": "string" + }, + "serviceId": { + "description": "The obfuscated ID of the service this privilege is for. This value is returned with [`Privileges.list()`](/admin-sdk/directory/v1/reference/privileges/list).", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "RoleAssignment": { + "description": "Defines an assignment of a role.", + "id": "RoleAssignment", + "properties": { + "assignedTo": { + "description": "The unique ID of the entity this role is assigned to—either the `user_id` of a user, the `group_id` of a group, or the `uniqueId` of a service account as defined in [Identity and Access Management (IAM)](https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts).", + "type": "string" + }, + "assigneeType": { + "description": "Output only. The type of the assignee (`USER` or `GROUP`).", + "enum": [ + "user", + "group" + ], + "enumDescriptions": [ + "An individual user within the domain.", + "A group within the domain." + ], + "readOnly": true, + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#roleAssignment", + "description": "The type of the API resource. This is always `admin#directory#roleAssignment`.", + "type": "string" + }, + "orgUnitId": { + "description": "If the role is restricted to an organization unit, this contains the ID for the organization unit the exercise of this role is restricted to.", + "type": "string" + }, + "roleAssignmentId": { + "description": "ID of this roleAssignment.", + "format": "int64", + "type": "string" + }, + "roleId": { + "description": "The ID of the role that is assigned.", + "format": "int64", + "type": "string" + }, + "scopeType": { + "description": "The scope in which this role is assigned.", + "type": "string" + } + }, + "type": "object" + }, + "RoleAssignments": { + "id": "RoleAssignments", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "items": { + "description": "A list of RoleAssignment resources.", + "items": { + "$ref": "RoleAssignment" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#roleAssignments", + "description": "The type of the API resource. This is always `admin#directory#roleAssignments`.", + "type": "string" + }, + "nextPageToken": { + "type": "string" + } + }, + "type": "object" + }, + "Roles": { + "id": "Roles", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "items": { + "description": "A list of Role resources.", + "items": { + "$ref": "Role" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#roles", + "description": "The type of the API resource. This is always `admin#directory#roles`.", + "type": "string" + }, + "nextPageToken": { + "type": "string" + } + }, + "type": "object" + }, + "Schema": { + "description": "The type of API resource. For Schema resources, this is always `admin#directory#schema`.", + "id": "Schema", + "properties": { + "displayName": { + "annotations": { + "required": [ + "directory.schemas.insert" + ] + }, + "description": "Display name for the schema.", + "type": "string" + }, + "etag": { + "description": "The ETag of the resource.", + "type": "string" + }, + "fields": { + "annotations": { + "required": [ + "directory.schemas.insert", + "directory.schemas.update" + ] + }, + "description": "A list of fields in the schema.", + "items": { + "$ref": "SchemaFieldSpec" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#schema", + "description": "Kind of resource this is.", + "type": "string" + }, + "schemaId": { + "description": "The unique identifier of the schema (Read-only)", + "type": "string" + }, + "schemaName": { + "annotations": { + "required": [ + "directory.schemas.insert" + ] + }, + "description": "The schema's name. Each `schema_name` must be unique within a customer. Reusing a name results in a `409: Entity already exists` error.", + "type": "string" + } + }, + "type": "object" + }, + "SchemaFieldSpec": { + "description": "You can use schemas to add custom fields to user profiles. You can use these fields to store information such as the projects your users work on, their physical locations, their hire dates, or whatever else fits your business needs. For more information, see [Custom User Fields](/admin-sdk/directory/v1/guides/manage-schemas).", + "id": "SchemaFieldSpec", + "properties": { + "displayName": { + "annotations": { + "required": [ + "directory.schemas.insert", + "directory.schemas.update" + ] + }, + "description": "Display Name of the field.", + "type": "string" + }, + "etag": { + "description": "The ETag of the field.", + "type": "string" + }, + "fieldId": { + "description": "The unique identifier of the field (Read-only)", + "type": "string" + }, + "fieldName": { + "annotations": { + "required": [ + "directory.schemas.insert", + "directory.schemas.update" + ] + }, + "description": "The name of the field.", + "type": "string" + }, + "fieldType": { + "annotations": { + "required": [ + "directory.schemas.insert", + "directory.schemas.update" + ] + }, + "description": "The type of the field.", + "type": "string" + }, + "indexed": { + "default": "true", + "description": "Boolean specifying whether the field is indexed or not. Default: `true`.", + "type": "boolean" + }, + "kind": { + "default": "admin#directory#schema#fieldspec", + "description": "The kind of resource this is. For schema fields this is always `admin#directory#schema#fieldspec`.", + "type": "string" + }, + "multiValued": { + "description": "A boolean specifying whether this is a multi-valued field or not. Default: `false`.", + "type": "boolean" + }, + "numericIndexingSpec": { + "description": "Indexing spec for a numeric field. By default, only exact match queries will be supported for numeric fields. Setting the `numericIndexingSpec` allows range queries to be supported.", + "properties": { + "maxValue": { + "description": "Maximum value of this field. This is meant to be indicative rather than enforced. Values outside this range will still be indexed, but search may not be as performant.", + "format": "double", + "type": "number" + }, + "minValue": { + "description": "Minimum value of this field. This is meant to be indicative rather than enforced. Values outside this range will still be indexed, but search may not be as performant.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "readAccessType": { + "default": "ALL_DOMAIN_USERS", + "description": "Specifies who can view values of this field. See [Retrieve users as a non-administrator](/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin) for more information. Note: It may take up to 24 hours for changes to this field to be reflected.", + "type": "string" + } + }, + "type": "object" + }, + "Schemas": { + "description": "JSON response template for List Schema operation in Directory API.", + "id": "Schemas", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#schemas", + "description": "Kind of resource this is.", + "type": "string" + }, + "schemas": { + "description": "A list of UserSchema objects.", + "items": { + "$ref": "Schema" + }, + "type": "array" + } + }, + "type": "object" + }, + "Token": { + "description": "JSON template for token resource in Directory API.", + "id": "Token", + "properties": { + "anonymous": { + "description": "Whether the application is registered with Google. The value is `true` if the application has an anonymous Client ID.", + "type": "boolean" + }, + "clientId": { + "description": "The Client ID of the application the token is issued to.", + "type": "string" + }, + "displayText": { + "description": "The displayable name of the application the token is issued to.", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#token", + "description": "The type of the API resource. This is always `admin#directory#token`.", + "type": "string" + }, + "nativeApp": { + "description": "Whether the token is issued to an installed application. The value is `true` if the application is installed to a desktop or mobile device.", + "type": "boolean" + }, + "scopes": { + "description": "A list of authorization scopes the application is granted.", + "items": { + "type": "string" + }, + "type": "array" + }, + "userKey": { + "description": "The unique ID of the user that issued the token.", + "type": "string" + } + }, + "type": "object" + }, + "Tokens": { + "description": "JSON response template for List tokens operation in Directory API.", + "id": "Tokens", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "items": { + "description": "A list of Token resources.", + "items": { + "$ref": "Token" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#tokenList", + "description": "The type of the API resource. This is always `admin#directory#tokenList`.", + "type": "string" + } + }, + "type": "object" + }, + "User": { + "description": "The Directory API allows you to create and manage your account's users, user aliases, and user Google profile photos. For more information about common tasks, see the [User Accounts Developer's Guide](/admin-sdk/directory/v1/guides/manage-users.html) and the [User Aliases Developer's Guide](/admin-sdk/directory/v1/guides/manage-user-aliases.html).", + "id": "User", + "properties": { + "addresses": { + "description": "The list of the user's addresses. The maximum allowed data size for this field is 10KB.", + "type": "any" + }, + "agreedToTerms": { + "description": "Output only. This property is `true` if the user has completed an initial login and accepted the Terms of Service agreement.", + "readOnly": true, + "type": "boolean" + }, + "aliases": { + "description": "Output only. The list of the user's alias email addresses.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + }, + "archived": { + "description": "Indicates if user is archived.", + "type": "boolean" + }, + "changePasswordAtNextLogin": { + "description": "Indicates if the user is forced to change their password at next login. This setting doesn't apply when [the user signs in via a third-party identity provider](https://support.google.com/a/answer/60224).", + "type": "boolean" + }, + "creationTime": { + "description": "User's G Suite account creation time. (Read-only)", + "format": "date-time", + "readOnly": true, + "type": "string" + }, + "customSchemas": { + "additionalProperties": { + "$ref": "UserCustomProperties" + }, + "description": "Custom fields of the user. The key is a `schema_name` and its values are `'field_name': 'field_value'`.", + "type": "object" + }, + "customerId": { + "description": "Output only. The customer ID to [retrieve all account users](/admin-sdk/directory/v1/guides/manage-users.html#get_all_users). You can use the alias `my_customer` to represent your account's `customerId`. As a reseller administrator, you can use the resold customer account's `customerId`. To get a `customerId`, use the account's primary domain in the `domain` parameter of a [users.list](/admin-sdk/directory/v1/reference/users/list) request.", + "readOnly": true, + "type": "string" + }, + "deletionTime": { + "format": "date-time", + "readOnly": true, + "type": "string" + }, + "emails": { + "description": "The list of the user's email addresses. The maximum allowed data size for this field is 10KB.", + "type": "any" + }, + "etag": { + "description": "Output only. ETag of the resource.", + "readOnly": true, + "type": "string" + }, + "externalIds": { + "description": "The list of external IDs for the user, such as an employee or network ID. The maximum allowed data size for this field is 2KB.", + "type": "any" + }, + "gender": { + "description": "The user's gender. The maximum allowed data size for this field is 1KB.", + "type": "any" + }, + "hashFunction": { + "description": "Stores the hash format of the `password` property. The following `hashFunction` values are allowed: * `MD5` - Accepts simple hex-encoded values. * `SHA-1` - Accepts simple hex-encoded values. * `crypt` - Compliant with the [C crypt library](https://en.wikipedia.org/wiki/Crypt_%28C%29). Supports the DES, MD5 (hash prefix `$1$`), SHA-256 (hash prefix `$5$`), and SHA-512 (hash prefix `$6$`) hash algorithms. If rounds are specified as part of the prefix, they must be 10,000 or fewer.", + "type": "string" + }, + "id": { + "description": "The unique ID for the user. A user `id` can be used as a user request URI's `userKey`.", + "type": "string" + }, + "ims": { + "description": "The list of the user's Instant Messenger (IM) accounts. A user account can have multiple ims properties. But, only one of these ims properties can be the primary IM contact. The maximum allowed data size for this field is 2KB.", + "type": "any" + }, + "includeInGlobalAddressList": { + "description": "Indicates if the user's profile is visible in the Google Workspace global address list when the contact sharing feature is enabled for the domain. For more information about excluding user profiles, see the [administration help center](https://support.google.com/a/answer/1285988).", + "type": "boolean" + }, + "ipWhitelisted": { + "description": "If `true`, the user's IP address is subject to a deprecated IP address [`allowlist`](https://support.google.com/a/answer/60752) configuration.", + "type": "boolean" + }, + "isAdmin": { + "description": "Output only. Indicates a user with super admininistrator privileges. The `isAdmin` property can only be edited in the [Make a user an administrator](/admin-sdk/directory/v1/guides/manage-users.html#make_admin) operation ( [makeAdmin](/admin-sdk/directory/v1/reference/users/makeAdmin.html) method). If edited in the user [insert](/admin-sdk/directory/v1/reference/users/insert.html) or [update](/admin-sdk/directory/v1/reference/users/update.html) methods, the edit is ignored by the API service.", + "readOnly": true, + "type": "boolean" + }, + "isDelegatedAdmin": { + "description": "Output only. Indicates if the user is a delegated administrator. Delegated administrators are supported by the API but cannot create or undelete users, or make users administrators. These requests are ignored by the API service. Roles and privileges for administrators are assigned using the [Admin console](https://support.google.com/a/answer/33325).", + "readOnly": true, + "type": "boolean" + }, + "isEnforcedIn2Sv": { + "description": "Output only. Is 2-step verification enforced (Read-only)", + "readOnly": true, + "type": "boolean" + }, + "isEnrolledIn2Sv": { + "description": "Output only. Is enrolled in 2-step verification (Read-only)", + "readOnly": true, + "type": "boolean" + }, + "isMailboxSetup": { + "description": "Output only. Indicates if the user's Google mailbox is created. This property is only applicable if the user has been assigned a Gmail license.", + "readOnly": true, + "type": "boolean" + }, + "keywords": { + "description": "The list of the user's keywords. The maximum allowed data size for this field is 1KB.", + "type": "any" + }, + "kind": { + "default": "admin#directory#user", + "description": "Output only. The type of the API resource. For Users resources, the value is `admin#directory#user`.", + "readOnly": true, + "type": "string" + }, + "languages": { + "description": "The user's languages. The maximum allowed data size for this field is 1KB.", + "type": "any" + }, + "lastLoginTime": { + "description": "User's last login time. (Read-only)", + "format": "date-time", + "readOnly": true, + "type": "string" + }, + "locations": { + "description": "The user's locations. The maximum allowed data size for this field is 10KB.", + "type": "any" + }, + "name": { + "$ref": "UserName", + "annotations": { + "required": [ + "directory.users.insert" + ] + }, + "description": "Holds the given and family names of the user, and the read-only `fullName` value. The maximum number of characters in the `givenName` and in the `familyName` values is 60. In addition, name values support unicode/UTF-8 characters, and can contain spaces, letters (a-z), numbers (0-9), dashes (-), forward slashes (/), and periods (.). For more information about character usage rules, see the [administration help center](https://support.google.com/a/answer/9193374). Maximum allowed data size for this field is 1KB." + }, + "nonEditableAliases": { + "description": "Output only. The list of the user's non-editable alias email addresses. These are typically outside the account's primary domain or sub-domain.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + }, + "notes": { + "description": "Notes for the user.", + "type": "any" + }, + "orgUnitPath": { + "description": "The full path of the parent organization associated with the user. If the parent organization is the top-level, it is represented as a forward slash (`/`).", + "type": "string" + }, + "organizations": { + "description": "The list of organizations the user belongs to. The maximum allowed data size for this field is 10KB.", + "type": "any" + }, + "password": { + "annotations": { + "required": [ + "directory.users.insert" + ] + }, + "description": "User's password", + "type": "string" + }, + "phones": { + "description": "The list of the user's phone numbers. The maximum allowed data size for this field is 1KB.", + "type": "any" + }, + "posixAccounts": { + "description": "The list of [POSIX](https://www.opengroup.org/austin/papers/posix_faq.html) account information for the user.", + "type": "any" + }, + "primaryEmail": { + "annotations": { + "required": [ + "directory.users.insert" + ] + }, + "description": "The user's primary email address. This property is required in a request to create a user account. The `primaryEmail` must be unique and cannot be an alias of another user.", + "type": "string" + }, + "recoveryEmail": { + "description": "Recovery email of the user.", + "type": "string" + }, + "recoveryPhone": { + "description": "Recovery phone of the user. The phone number must be in the E.164 format, starting with the plus sign (+). Example: *+16506661212*.", + "type": "string" + }, + "relations": { + "description": "The list of the user's relationships to other users. The maximum allowed data size for this field is 2KB.", + "type": "any" + }, + "sshPublicKeys": { + "description": "A list of SSH public keys.", + "type": "any" + }, + "suspended": { + "description": "Indicates if user is suspended.", + "type": "boolean" + }, + "suspensionReason": { + "description": "Output only. Has the reason a user account is suspended either by the administrator or by Google at the time of suspension. The property is returned only if the `suspended` property is `true`.", + "readOnly": true, + "type": "string" + }, + "thumbnailPhotoEtag": { + "description": "Output only. ETag of the user's photo (Read-only)", + "readOnly": true, + "type": "string" + }, + "thumbnailPhotoUrl": { + "description": "Output only. The URL of the user's profile photo. The URL might be temporary or private.", + "readOnly": true, + "type": "string" + }, + "websites": { + "description": "The user's websites. The maximum allowed data size for this field is 2KB.", + "type": "any" + } + }, + "type": "object" + }, + "UserAbout": { + "description": "JSON template for About (notes) of a user in Directory API.", + "id": "UserAbout", + "properties": { + "contentType": { + "description": "About entry can have a type which indicates the content type. It can either be plain or html. By default, notes contents are assumed to contain plain text.", + "type": "string" + }, + "value": { + "description": "Actual value of notes.", + "type": "string" + } + }, + "type": "object" + }, + "UserAddress": { + "description": "JSON template for address.", + "id": "UserAddress", + "properties": { + "country": { + "description": "Country.", + "type": "string" + }, + "countryCode": { + "description": "Country code.", + "type": "string" + }, + "customType": { + "description": "Custom type.", + "type": "string" + }, + "extendedAddress": { + "description": "Extended Address.", + "type": "string" + }, + "formatted": { + "description": "Formatted address.", + "type": "string" + }, + "locality": { + "description": "Locality.", + "type": "string" + }, + "poBox": { + "description": "Other parts of address.", + "type": "string" + }, + "postalCode": { + "description": "Postal code.", + "type": "string" + }, + "primary": { + "description": "If this is user's primary address. Only one entry could be marked as primary.", + "type": "boolean" + }, + "region": { + "description": "Region.", + "type": "string" + }, + "sourceIsStructured": { + "description": "User supplied address was structured. Structured addresses are NOT supported at this time. You might be able to write structured addresses but any values will eventually be clobbered.", + "type": "boolean" + }, + "streetAddress": { + "description": "Street.", + "type": "string" + }, + "type": { + "description": "Each entry can have a type which indicates standard values of that entry. For example address could be of home work etc. In addition to the standard type an entry can have a custom type and can take any value. Such type should have the CUSTOM value as type and also have a customType value.", + "type": "string" + } + }, + "type": "object" + }, + "UserAlias": { + "description": "The Directory API manages aliases, which are alternative email addresses.", + "id": "UserAlias", + "properties": { + "alias": { + "description": "The alias email address.", + "type": "string" + }, + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "id": { + "description": "The unique ID for the user.", + "type": "string" + }, + "kind": { + "description": "The type of the API resource. For Alias resources, the value is `admin#directory#alias`.", + "type": "string" + }, + "primaryEmail": { + "description": "The user's primary email address.", + "type": "string" + } + }, + "type": "object" + }, + "UserCustomProperties": { + "additionalProperties": { + "type": "any" + }, + "description": "JSON template for a set of custom properties (i.e. all fields in a particular schema)", + "id": "UserCustomProperties", + "type": "object" + }, + "UserEmail": { + "description": "JSON template for an email.", + "id": "UserEmail", + "properties": { + "address": { + "description": "Email id of the user.", + "type": "string" + }, + "customType": { + "description": "Custom Type.", + "type": "string" + }, + "primary": { + "description": "If this is user's primary email. Only one entry could be marked as primary.", + "type": "boolean" + }, + "type": { + "description": "Each entry can have a type which indicates standard types of that entry. For example email could be of home, work etc. In addition to the standard type, an entry can have a custom type and can take any value Such types should have the CUSTOM value as type and also have a customType value.", + "type": "string" + } + }, + "type": "object" + }, + "UserExternalId": { + "description": "JSON template for an externalId entry.", + "id": "UserExternalId", + "properties": { + "customType": { + "description": "Custom type.", + "type": "string" + }, + "type": { + "description": "The type of the Id.", + "type": "string" + }, + "value": { + "description": "The value of the id.", + "type": "string" + } + }, + "type": "object" + }, + "UserGender": { + "id": "UserGender", + "properties": { + "addressMeAs": { + "description": "AddressMeAs. A human-readable string containing the proper way to refer to the profile owner by humans for example he/him/his or they/them/their.", + "type": "string" + }, + "customGender": { + "description": "Custom gender.", + "type": "string" + }, + "type": { + "description": "Gender.", + "type": "string" + } + }, + "type": "object" + }, + "UserIm": { + "description": "JSON template for instant messenger of an user.", + "id": "UserIm", + "properties": { + "customProtocol": { + "description": "Custom protocol.", + "type": "string" + }, + "customType": { + "description": "Custom type.", + "type": "string" + }, + "im": { + "description": "Instant messenger id.", + "type": "string" + }, + "primary": { + "description": "If this is user's primary im. Only one entry could be marked as primary.", + "type": "boolean" + }, + "protocol": { + "description": "Protocol used in the instant messenger. It should be one of the values from ImProtocolTypes map. Similar to type it can take a CUSTOM value and specify the custom name in customProtocol field.", + "type": "string" + }, + "type": { + "description": "Each entry can have a type which indicates standard types of that entry. For example instant messengers could be of home work etc. In addition to the standard type an entry can have a custom type and can take any value. Such types should have the CUSTOM value as type and also have a customType value.", + "type": "string" + } + }, + "type": "object" + }, + "UserKeyword": { + "description": "JSON template for a keyword entry.", + "id": "UserKeyword", + "properties": { + "customType": { + "description": "Custom Type.", + "type": "string" + }, + "type": { + "description": "Each entry can have a type which indicates standard type of that entry. For example keyword could be of type occupation or outlook. In addition to the standard type an entry can have a custom type and can give it any name. Such types should have the CUSTOM value as type and also have a customType value.", + "type": "string" + }, + "value": { + "description": "Keyword.", + "type": "string" + } + }, + "type": "object" + }, + "UserLanguage": { + "description": "JSON template for a language entry.", + "id": "UserLanguage", + "properties": { + "customLanguage": { + "description": "Other language. User can provide their own language name if there is no corresponding ISO 639 language code. If this is set, `languageCode` can't be set.", + "type": "string" + }, + "languageCode": { + "description": "ISO 639 string representation of a language. See [Language Codes](/admin-sdk/directory/v1/languages) for the list of supported codes. Valid language codes outside the supported set will be accepted by the API but may lead to unexpected behavior. Illegal values cause `SchemaException`. If this is set, `customLanguage` can't be set.", + "type": "string" + }, + "preference": { + "description": "Optional. If present, controls whether the specified `languageCode` is the user's preferred language. If `customLanguage` is set, this can't be set. Allowed values are `preferred` and `not_preferred`.", + "type": "string" + } + }, + "type": "object" + }, + "UserLocation": { + "description": "JSON template for a location entry.", + "id": "UserLocation", + "properties": { + "area": { + "description": "Textual location. This is most useful for display purposes to concisely describe the location. For example 'Mountain View, CA', 'Near Seattle', 'US-NYC-9TH 9A209A.''", + "type": "string" + }, + "buildingId": { + "description": "Building Identifier.", + "type": "string" + }, + "customType": { + "description": "Custom Type.", + "type": "string" + }, + "deskCode": { + "description": "Most specific textual code of individual desk location.", + "type": "string" + }, + "floorName": { + "description": "Floor name/number.", + "type": "string" + }, + "floorSection": { + "description": "Floor section. More specific location within the floor. For example if a floor is divided into sections 'A', 'B' and 'C' this field would identify one of those values.", + "type": "string" + }, + "type": { + "description": "Each entry can have a type which indicates standard types of that entry. For example location could be of types default and desk. In addition to standard type an entry can have a custom type and can give it any name. Such types should have 'custom' as type and also have a customType value.", + "type": "string" + } + }, + "type": "object" + }, + "UserMakeAdmin": { + "id": "UserMakeAdmin", + "properties": { + "status": { + "annotations": { + "required": [ + "directory.users.makeAdmin" + ] + }, + "description": "Indicates the administrator status of the user.", + "type": "boolean" + } + }, + "type": "object" + }, + "UserName": { + "id": "UserName", + "properties": { + "displayName": { + "description": "The user's display name. Limit: 256 characters.", + "type": "string" + }, + "familyName": { + "annotations": { + "required": [ + "directory.users.insert" + ] + }, + "description": "The user's last name. Required when creating a user account.", + "type": "string" + }, + "fullName": { + "description": "The user's full name formed by concatenating the first and last name values.", + "type": "string" + }, + "givenName": { + "annotations": { + "required": [ + "directory.users.insert" + ] + }, + "description": "The user's first name. Required when creating a user account.", + "type": "string" + } + }, + "type": "object" + }, + "UserOrganization": { + "description": "JSON template for an organization entry.", + "id": "UserOrganization", + "properties": { + "costCenter": { + "description": "The cost center of the users department.", + "type": "string" + }, + "customType": { + "description": "Custom type.", + "type": "string" + }, + "department": { + "description": "Department within the organization.", + "type": "string" + }, + "description": { + "description": "Description of the organization.", + "type": "string" + }, + "domain": { + "description": "The domain to which the organization belongs to.", + "type": "string" + }, + "fullTimeEquivalent": { + "description": "The full-time equivalent millipercent within the organization (100000 = 100%).", + "format": "int32", + "type": "integer" + }, + "location": { + "description": "Location of the organization. This need not be fully qualified address.", + "type": "string" + }, + "name": { + "description": "Name of the organization", + "type": "string" + }, + "primary": { + "description": "If it user's primary organization.", + "type": "boolean" + }, + "symbol": { + "description": "Symbol of the organization.", + "type": "string" + }, + "title": { + "description": "Title (designation) of the user in the organization.", + "type": "string" + }, + "type": { + "description": "Each entry can have a type which indicates standard types of that entry. For example organization could be of school work etc. In addition to the standard type an entry can have a custom type and can give it any name. Such types should have the CUSTOM value as type and also have a CustomType value.", + "type": "string" + } + }, + "type": "object" + }, + "UserPhone": { + "description": "JSON template for a phone entry.", + "id": "UserPhone", + "properties": { + "customType": { + "description": "Custom Type.", + "type": "string" + }, + "primary": { + "description": "If this is user's primary phone or not.", + "type": "boolean" + }, + "type": { + "description": "Each entry can have a type which indicates standard types of that entry. For example phone could be of home_fax work mobile etc. In addition to the standard type an entry can have a custom type and can give it any name. Such types should have the CUSTOM value as type and also have a customType value.", + "type": "string" + }, + "value": { + "description": "Phone number.", + "type": "string" + } + }, + "type": "object" + }, + "UserPhoto": { + "id": "UserPhoto", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "height": { + "description": "Height of the photo in pixels.", + "format": "int32", + "type": "integer" + }, + "id": { + "description": "The ID the API uses to uniquely identify the user.", + "type": "string" + }, + "kind": { + "default": "admin#directory#user#photo", + "description": "The type of the API resource. For Photo resources, this is `admin#directory#user#photo`.", + "type": "string" + }, + "mimeType": { + "description": "The MIME type of the photo. Allowed values are `JPEG`, `PNG`, `GIF`, `BMP`, `TIFF`, and web-safe base64 encoding.", + "type": "string" + }, + "photoData": { + "annotations": { + "required": [ + "directory.users.photos.update" + ] + }, + "description": "The user photo's upload data in [web-safe Base64](https://en.wikipedia.org/wiki/Base64#URL_applications) format in bytes. This means: * The slash (/) character is replaced with the underscore (_) character. * The plus sign (+) character is replaced with the hyphen (-) character. * The equals sign (=) character is replaced with the asterisk (*). * For padding, the period (.) character is used instead of the RFC-4648 baseURL definition which uses the equals sign (=) for padding. This is done to simplify URL-parsing. * Whatever the size of the photo being uploaded, the API downsizes it to 96x96 pixels.", + "format": "byte", + "type": "string" + }, + "primaryEmail": { + "description": "The user's primary email address.", + "type": "string" + }, + "width": { + "description": "Width of the photo in pixels.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "UserPosixAccount": { + "description": "JSON template for a POSIX account entry.", + "id": "UserPosixAccount", + "properties": { + "accountId": { + "description": "A POSIX account field identifier.", + "type": "string" + }, + "gecos": { + "description": "The GECOS (user information) for this account.", + "type": "string" + }, + "gid": { + "description": "The default group ID.", + "format": "uint64", + "type": "string" + }, + "homeDirectory": { + "description": "The path to the home directory for this account.", + "type": "string" + }, + "operatingSystemType": { + "description": "The operating system type for this account.", + "type": "string" + }, + "primary": { + "description": "If this is user's primary account within the SystemId.", + "type": "boolean" + }, + "shell": { + "description": "The path to the login shell for this account.", + "type": "string" + }, + "systemId": { + "description": "System identifier for which account Username or Uid apply to.", + "type": "string" + }, + "uid": { + "description": "The POSIX compliant user ID.", + "format": "uint64", + "type": "string" + }, + "username": { + "description": "The username of the account.", + "type": "string" + } + }, + "type": "object" + }, + "UserRelation": { + "description": "JSON template for a relation entry.", + "id": "UserRelation", + "properties": { + "customType": { + "description": "Custom Type.", + "type": "string" + }, + "type": { + "description": "The relation of the user. Some of the possible values are mother father sister brother manager assistant partner.", + "type": "string" + }, + "value": { + "description": "The name of the relation.", + "type": "string" + } + }, + "type": "object" + }, + "UserSshPublicKey": { + "description": "JSON template for a POSIX account entry.", + "id": "UserSshPublicKey", + "properties": { + "expirationTimeUsec": { + "description": "An expiration time in microseconds since epoch.", + "format": "int64", + "type": "string" + }, + "fingerprint": { + "description": "A SHA-256 fingerprint of the SSH public key. (Read-only)", + "readOnly": true, + "type": "string" + }, + "key": { + "description": "An SSH public key.", + "type": "string" + } + }, + "type": "object" + }, + "UserUndelete": { + "id": "UserUndelete", + "properties": { + "orgUnitPath": { + "description": "OrgUnit of User", + "type": "string" + } + }, + "type": "object" + }, + "UserWebsite": { + "description": "JSON template for a website entry.", + "id": "UserWebsite", + "properties": { + "customType": { + "description": "Custom Type.", + "type": "string" + }, + "primary": { + "description": "If this is user's primary website or not.", + "type": "boolean" + }, + "type": { + "description": "Each entry can have a type which indicates standard types of that entry. For example website could be of home work blog etc. In addition to the standard type an entry can have a custom type and can give it any name. Such types should have the CUSTOM value as type and also have a customType value.", + "type": "string" + }, + "value": { + "description": "Website.", + "type": "string" + } + }, + "type": "object" + }, + "Users": { + "id": "Users", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#users", + "description": "Kind of resource this is.", + "type": "string" + }, + "nextPageToken": { + "description": "Token used to access next page of this result.", + "type": "string" + }, + "trigger_event": { + "description": "Event that triggered this response (only used in case of Push Response)", + "type": "string" + }, + "users": { + "description": "A list of user objects.", + "items": { + "$ref": "User" + }, + "type": "array" + } + }, + "type": "object" + }, + "VerificationCode": { + "description": "The Directory API allows you to view, generate, and invalidate backup verification codes for a user.", + "id": "VerificationCode", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "kind": { + "default": "admin#directory#verificationCode", + "description": "The type of the resource. This is always `admin#directory#verificationCode`.", + "type": "string" + }, + "userId": { + "description": "The obfuscated unique ID of the user.", + "type": "string" + }, + "verificationCode": { + "description": "A current verification code for the user. Invalidated or used verification codes are not returned as part of the result.", + "type": "string" + } + }, + "type": "object" + }, + "VerificationCodes": { + "description": "JSON response template for list verification codes operation in Directory API.", + "id": "VerificationCodes", + "properties": { + "etag": { + "description": "ETag of the resource.", + "type": "string" + }, + "items": { + "description": "A list of verification code resources.", + "items": { + "$ref": "VerificationCode" + }, + "type": "array" + }, + "kind": { + "default": "admin#directory#verificationCodesList", + "description": "The type of the resource. This is always `admin#directory#verificationCodesList`.", + "type": "string" + } + }, + "type": "object" + } + }, + "servicePath": "", + "title": "Admin SDK API", + "version": "directory_v1" +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go b/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go new file mode 100644 index 00000000..dce6cb1e --- /dev/null +++ b/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go @@ -0,0 +1,26141 @@ +// Copyright 2023 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated file. DO NOT EDIT. + +// Package admin provides access to the Admin SDK API. +// +// For product documentation, see: https://developers.google.com/admin-sdk/ +// +// # Creating a client +// +// Usage example: +// +// import "google.golang.org/api/admin/directory/v1" +// ... +// ctx := context.Background() +// adminService, err := admin.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// # Other authentication options +// +// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// +// adminService, err := admin.NewService(ctx, option.WithScopes(admin.CloudPlatformScope)) +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// adminService, err := admin.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// adminService, err := admin.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. +package admin // import "google.golang.org/api/admin/directory/v1" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + googleapi "google.golang.org/api/googleapi" + internal "google.golang.org/api/internal" + gensupport "google.golang.org/api/internal/gensupport" + option "google.golang.org/api/option" + internaloption "google.golang.org/api/option/internaloption" + htransport "google.golang.org/api/transport/http" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version + +const apiId = "admin:directory_v1" +const apiName = "admin" +const apiVersion = "directory_v1" +const basePath = "https://admin.googleapis.com/" +const mtlsBasePath = "https://admin.mtls.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // See, add, edit, and permanently delete the printers that your + // organization can use with Chrome + AdminChromePrintersScope = "https://www.googleapis.com/auth/admin.chrome.printers" + + // See the printers that your organization can use with Chrome + AdminChromePrintersReadonlyScope = "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + + // View and manage customer related information + AdminDirectoryCustomerScope = "https://www.googleapis.com/auth/admin.directory.customer" + + // View customer related information + AdminDirectoryCustomerReadonlyScope = "https://www.googleapis.com/auth/admin.directory.customer.readonly" + + // View and manage your Chrome OS devices' metadata + AdminDirectoryDeviceChromeosScope = "https://www.googleapis.com/auth/admin.directory.device.chromeos" + + // View your Chrome OS devices' metadata + AdminDirectoryDeviceChromeosReadonlyScope = "https://www.googleapis.com/auth/admin.directory.device.chromeos.readonly" + + // View and manage your mobile devices' metadata + AdminDirectoryDeviceMobileScope = "https://www.googleapis.com/auth/admin.directory.device.mobile" + + // Manage your mobile devices by performing administrative tasks + AdminDirectoryDeviceMobileActionScope = "https://www.googleapis.com/auth/admin.directory.device.mobile.action" + + // View your mobile devices' metadata + AdminDirectoryDeviceMobileReadonlyScope = "https://www.googleapis.com/auth/admin.directory.device.mobile.readonly" + + // View and manage the provisioning of domains for your customers + AdminDirectoryDomainScope = "https://www.googleapis.com/auth/admin.directory.domain" + + // View domains related to your customers + AdminDirectoryDomainReadonlyScope = "https://www.googleapis.com/auth/admin.directory.domain.readonly" + + // View and manage the provisioning of groups on your domain + AdminDirectoryGroupScope = "https://www.googleapis.com/auth/admin.directory.group" + + // View and manage group subscriptions on your domain + AdminDirectoryGroupMemberScope = "https://www.googleapis.com/auth/admin.directory.group.member" + + // View group subscriptions on your domain + AdminDirectoryGroupMemberReadonlyScope = "https://www.googleapis.com/auth/admin.directory.group.member.readonly" + + // View groups on your domain + AdminDirectoryGroupReadonlyScope = "https://www.googleapis.com/auth/admin.directory.group.readonly" + + // View and manage organization units on your domain + AdminDirectoryOrgunitScope = "https://www.googleapis.com/auth/admin.directory.orgunit" + + // View organization units on your domain + AdminDirectoryOrgunitReadonlyScope = "https://www.googleapis.com/auth/admin.directory.orgunit.readonly" + + // View and manage the provisioning of calendar resources on your domain + AdminDirectoryResourceCalendarScope = "https://www.googleapis.com/auth/admin.directory.resource.calendar" + + // View calendar resources on your domain + AdminDirectoryResourceCalendarReadonlyScope = "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + + // Manage delegated admin roles for your domain + AdminDirectoryRolemanagementScope = "https://www.googleapis.com/auth/admin.directory.rolemanagement" + + // View delegated admin roles for your domain + AdminDirectoryRolemanagementReadonlyScope = "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + + // View and manage the provisioning of users on your domain + AdminDirectoryUserScope = "https://www.googleapis.com/auth/admin.directory.user" + + // View and manage user aliases on your domain + AdminDirectoryUserAliasScope = "https://www.googleapis.com/auth/admin.directory.user.alias" + + // View user aliases on your domain + AdminDirectoryUserAliasReadonlyScope = "https://www.googleapis.com/auth/admin.directory.user.alias.readonly" + + // See info about users on your domain + AdminDirectoryUserReadonlyScope = "https://www.googleapis.com/auth/admin.directory.user.readonly" + + // Manage data access permissions for users on your domain + AdminDirectoryUserSecurityScope = "https://www.googleapis.com/auth/admin.directory.user.security" + + // View and manage the provisioning of user schemas on your domain + AdminDirectoryUserschemaScope = "https://www.googleapis.com/auth/admin.directory.userschema" + + // View user schemas on your domain + AdminDirectoryUserschemaReadonlyScope = "https://www.googleapis.com/auth/admin.directory.userschema.readonly" + + // See, edit, configure, and delete your Google Cloud data and see the + // email address for your Google Account. + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := internaloption.WithDefaultScopes( + "https://www.googleapis.com/auth/admin.chrome.printers", + "https://www.googleapis.com/auth/admin.chrome.printers.readonly", + "https://www.googleapis.com/auth/admin.directory.customer", + "https://www.googleapis.com/auth/admin.directory.customer.readonly", + "https://www.googleapis.com/auth/admin.directory.device.chromeos", + "https://www.googleapis.com/auth/admin.directory.device.chromeos.readonly", + "https://www.googleapis.com/auth/admin.directory.device.mobile", + "https://www.googleapis.com/auth/admin.directory.device.mobile.action", + "https://www.googleapis.com/auth/admin.directory.device.mobile.readonly", + "https://www.googleapis.com/auth/admin.directory.domain", + "https://www.googleapis.com/auth/admin.directory.domain.readonly", + "https://www.googleapis.com/auth/admin.directory.group", + "https://www.googleapis.com/auth/admin.directory.group.member", + "https://www.googleapis.com/auth/admin.directory.group.member.readonly", + "https://www.googleapis.com/auth/admin.directory.group.readonly", + "https://www.googleapis.com/auth/admin.directory.orgunit", + "https://www.googleapis.com/auth/admin.directory.orgunit.readonly", + "https://www.googleapis.com/auth/admin.directory.resource.calendar", + "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly", + "https://www.googleapis.com/auth/admin.directory.rolemanagement", + "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly", + "https://www.googleapis.com/auth/admin.directory.user", + "https://www.googleapis.com/auth/admin.directory.user.alias", + "https://www.googleapis.com/auth/admin.directory.user.alias.readonly", + "https://www.googleapis.com/auth/admin.directory.user.readonly", + "https://www.googleapis.com/auth/admin.directory.user.security", + "https://www.googleapis.com/auth/admin.directory.userschema", + "https://www.googleapis.com/auth/admin.directory.userschema.readonly", + "https://www.googleapis.com/auth/cloud-platform", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Asps = NewAspsService(s) + s.Channels = NewChannelsService(s) + s.Chromeosdevices = NewChromeosdevicesService(s) + s.Customer = NewCustomerService(s) + s.Customers = NewCustomersService(s) + s.DomainAliases = NewDomainAliasesService(s) + s.Domains = NewDomainsService(s) + s.Groups = NewGroupsService(s) + s.Members = NewMembersService(s) + s.Mobiledevices = NewMobiledevicesService(s) + s.Orgunits = NewOrgunitsService(s) + s.Privileges = NewPrivilegesService(s) + s.Resources = NewResourcesService(s) + s.RoleAssignments = NewRoleAssignmentsService(s) + s.Roles = NewRolesService(s) + s.Schemas = NewSchemasService(s) + s.Tokens = NewTokensService(s) + s.TwoStepVerification = NewTwoStepVerificationService(s) + s.Users = NewUsersService(s) + s.VerificationCodes = NewVerificationCodesService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Asps *AspsService + + Channels *ChannelsService + + Chromeosdevices *ChromeosdevicesService + + Customer *CustomerService + + Customers *CustomersService + + DomainAliases *DomainAliasesService + + Domains *DomainsService + + Groups *GroupsService + + Members *MembersService + + Mobiledevices *MobiledevicesService + + Orgunits *OrgunitsService + + Privileges *PrivilegesService + + Resources *ResourcesService + + RoleAssignments *RoleAssignmentsService + + Roles *RolesService + + Schemas *SchemasService + + Tokens *TokensService + + TwoStepVerification *TwoStepVerificationService + + Users *UsersService + + VerificationCodes *VerificationCodesService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewAspsService(s *Service) *AspsService { + rs := &AspsService{s: s} + return rs +} + +type AspsService struct { + s *Service +} + +func NewChannelsService(s *Service) *ChannelsService { + rs := &ChannelsService{s: s} + return rs +} + +type ChannelsService struct { + s *Service +} + +func NewChromeosdevicesService(s *Service) *ChromeosdevicesService { + rs := &ChromeosdevicesService{s: s} + return rs +} + +type ChromeosdevicesService struct { + s *Service +} + +func NewCustomerService(s *Service) *CustomerService { + rs := &CustomerService{s: s} + rs.Devices = NewCustomerDevicesService(s) + return rs +} + +type CustomerService struct { + s *Service + + Devices *CustomerDevicesService +} + +func NewCustomerDevicesService(s *Service) *CustomerDevicesService { + rs := &CustomerDevicesService{s: s} + rs.Chromeos = NewCustomerDevicesChromeosService(s) + return rs +} + +type CustomerDevicesService struct { + s *Service + + Chromeos *CustomerDevicesChromeosService +} + +func NewCustomerDevicesChromeosService(s *Service) *CustomerDevicesChromeosService { + rs := &CustomerDevicesChromeosService{s: s} + rs.Commands = NewCustomerDevicesChromeosCommandsService(s) + return rs +} + +type CustomerDevicesChromeosService struct { + s *Service + + Commands *CustomerDevicesChromeosCommandsService +} + +func NewCustomerDevicesChromeosCommandsService(s *Service) *CustomerDevicesChromeosCommandsService { + rs := &CustomerDevicesChromeosCommandsService{s: s} + return rs +} + +type CustomerDevicesChromeosCommandsService struct { + s *Service +} + +func NewCustomersService(s *Service) *CustomersService { + rs := &CustomersService{s: s} + rs.Chrome = NewCustomersChromeService(s) + return rs +} + +type CustomersService struct { + s *Service + + Chrome *CustomersChromeService +} + +func NewCustomersChromeService(s *Service) *CustomersChromeService { + rs := &CustomersChromeService{s: s} + rs.PrintServers = NewCustomersChromePrintServersService(s) + rs.Printers = NewCustomersChromePrintersService(s) + return rs +} + +type CustomersChromeService struct { + s *Service + + PrintServers *CustomersChromePrintServersService + + Printers *CustomersChromePrintersService +} + +func NewCustomersChromePrintServersService(s *Service) *CustomersChromePrintServersService { + rs := &CustomersChromePrintServersService{s: s} + return rs +} + +type CustomersChromePrintServersService struct { + s *Service +} + +func NewCustomersChromePrintersService(s *Service) *CustomersChromePrintersService { + rs := &CustomersChromePrintersService{s: s} + return rs +} + +type CustomersChromePrintersService struct { + s *Service +} + +func NewDomainAliasesService(s *Service) *DomainAliasesService { + rs := &DomainAliasesService{s: s} + return rs +} + +type DomainAliasesService struct { + s *Service +} + +func NewDomainsService(s *Service) *DomainsService { + rs := &DomainsService{s: s} + return rs +} + +type DomainsService struct { + s *Service +} + +func NewGroupsService(s *Service) *GroupsService { + rs := &GroupsService{s: s} + rs.Aliases = NewGroupsAliasesService(s) + return rs +} + +type GroupsService struct { + s *Service + + Aliases *GroupsAliasesService +} + +func NewGroupsAliasesService(s *Service) *GroupsAliasesService { + rs := &GroupsAliasesService{s: s} + return rs +} + +type GroupsAliasesService struct { + s *Service +} + +func NewMembersService(s *Service) *MembersService { + rs := &MembersService{s: s} + return rs +} + +type MembersService struct { + s *Service +} + +func NewMobiledevicesService(s *Service) *MobiledevicesService { + rs := &MobiledevicesService{s: s} + return rs +} + +type MobiledevicesService struct { + s *Service +} + +func NewOrgunitsService(s *Service) *OrgunitsService { + rs := &OrgunitsService{s: s} + return rs +} + +type OrgunitsService struct { + s *Service +} + +func NewPrivilegesService(s *Service) *PrivilegesService { + rs := &PrivilegesService{s: s} + return rs +} + +type PrivilegesService struct { + s *Service +} + +func NewResourcesService(s *Service) *ResourcesService { + rs := &ResourcesService{s: s} + rs.Buildings = NewResourcesBuildingsService(s) + rs.Calendars = NewResourcesCalendarsService(s) + rs.Features = NewResourcesFeaturesService(s) + return rs +} + +type ResourcesService struct { + s *Service + + Buildings *ResourcesBuildingsService + + Calendars *ResourcesCalendarsService + + Features *ResourcesFeaturesService +} + +func NewResourcesBuildingsService(s *Service) *ResourcesBuildingsService { + rs := &ResourcesBuildingsService{s: s} + return rs +} + +type ResourcesBuildingsService struct { + s *Service +} + +func NewResourcesCalendarsService(s *Service) *ResourcesCalendarsService { + rs := &ResourcesCalendarsService{s: s} + return rs +} + +type ResourcesCalendarsService struct { + s *Service +} + +func NewResourcesFeaturesService(s *Service) *ResourcesFeaturesService { + rs := &ResourcesFeaturesService{s: s} + return rs +} + +type ResourcesFeaturesService struct { + s *Service +} + +func NewRoleAssignmentsService(s *Service) *RoleAssignmentsService { + rs := &RoleAssignmentsService{s: s} + return rs +} + +type RoleAssignmentsService struct { + s *Service +} + +func NewRolesService(s *Service) *RolesService { + rs := &RolesService{s: s} + return rs +} + +type RolesService struct { + s *Service +} + +func NewSchemasService(s *Service) *SchemasService { + rs := &SchemasService{s: s} + return rs +} + +type SchemasService struct { + s *Service +} + +func NewTokensService(s *Service) *TokensService { + rs := &TokensService{s: s} + return rs +} + +type TokensService struct { + s *Service +} + +func NewTwoStepVerificationService(s *Service) *TwoStepVerificationService { + rs := &TwoStepVerificationService{s: s} + return rs +} + +type TwoStepVerificationService struct { + s *Service +} + +func NewUsersService(s *Service) *UsersService { + rs := &UsersService{s: s} + rs.Aliases = NewUsersAliasesService(s) + rs.Photos = NewUsersPhotosService(s) + return rs +} + +type UsersService struct { + s *Service + + Aliases *UsersAliasesService + + Photos *UsersPhotosService +} + +func NewUsersAliasesService(s *Service) *UsersAliasesService { + rs := &UsersAliasesService{s: s} + return rs +} + +type UsersAliasesService struct { + s *Service +} + +func NewUsersPhotosService(s *Service) *UsersPhotosService { + rs := &UsersPhotosService{s: s} + return rs +} + +type UsersPhotosService struct { + s *Service +} + +func NewVerificationCodesService(s *Service) *VerificationCodesService { + rs := &VerificationCodesService{s: s} + return rs +} + +type VerificationCodesService struct { + s *Service +} + +// Alias: JSON template for Alias object in Directory API. +type Alias struct { + Alias string `json:"alias,omitempty"` + + Etag string `json:"etag,omitempty"` + + Id string `json:"id,omitempty"` + + Kind string `json:"kind,omitempty"` + + PrimaryEmail string `json:"primaryEmail,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Alias") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Alias") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Alias) MarshalJSON() ([]byte, error) { + type NoMethod Alias + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Aliases: JSON response template to list aliases in Directory API. +type Aliases struct { + Aliases []interface{} `json:"aliases,omitempty"` + + Etag string `json:"etag,omitempty"` + + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Aliases") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Aliases") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Aliases) MarshalJSON() ([]byte, error) { + type NoMethod Aliases + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Asp: An application-specific password (ASP) is used with applications +// that do not accept a verification code when logging into the +// application on certain devices. The ASP access code is used instead +// of the login and password you commonly use when accessing an +// application through a browser. For more information about ASPs and +// how to create one, see the help center +// (https://support.google.com/a/answer/2537800#asp). +type Asp struct { + // CodeId: The unique ID of the ASP. + CodeId int64 `json:"codeId,omitempty"` + + // CreationTime: The time when the ASP was created. Expressed in Unix + // time (https://en.wikipedia.org/wiki/Epoch_time) format. + CreationTime int64 `json:"creationTime,omitempty,string"` + + // Etag: ETag of the ASP. + Etag string `json:"etag,omitempty"` + + // Kind: The type of the API resource. This is always + // `admin#directory#asp`. + Kind string `json:"kind,omitempty"` + + // LastTimeUsed: The time when the ASP was last used. Expressed in Unix + // time (https://en.wikipedia.org/wiki/Epoch_time) format. + LastTimeUsed int64 `json:"lastTimeUsed,omitempty,string"` + + // Name: The name of the application that the user, represented by their + // `userId`, entered when the ASP was created. + Name string `json:"name,omitempty"` + + // UserKey: The unique ID of the user who issued the ASP. + UserKey string `json:"userKey,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CodeId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CodeId") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Asp) MarshalJSON() ([]byte, error) { + type NoMethod Asp + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Asps struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Items: A list of ASP resources. + Items []*Asp `json:"items,omitempty"` + + // Kind: The type of the API resource. This is always + // `admin#directory#aspList`. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Asps) MarshalJSON() ([]byte, error) { + type NoMethod Asps + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuxiliaryMessage: Auxiliary message about issues with printers or +// settings. Example: {message_type:AUXILIARY_MESSAGE_WARNING, +// field_mask:make_and_model, message:"Given printer is invalid or no +// longer supported."} +type AuxiliaryMessage struct { + // AuxiliaryMessage: Human readable message in English. Example: "Given + // printer is invalid or no longer supported." + AuxiliaryMessage string `json:"auxiliaryMessage,omitempty"` + + // FieldMask: Field that this message concerns. + FieldMask string `json:"fieldMask,omitempty"` + + // Severity: Message severity + // + // Possible values: + // "SEVERITY_UNSPECIFIED" - Message type unspecified. + // "SEVERITY_INFO" - Message of severity: info. + // "SEVERITY_WARNING" - Message of severity: warning. + // "SEVERITY_ERROR" - Message of severity: error. + Severity string `json:"severity,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuxiliaryMessage") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuxiliaryMessage") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AuxiliaryMessage) MarshalJSON() ([]byte, error) { + type NoMethod AuxiliaryMessage + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BatchCreatePrintServersRequest: Request to add multiple new print +// servers in a batch. +type BatchCreatePrintServersRequest struct { + // Requests: Required. A list of `PrintServer` resources to be created + // (max `50` per batch). + Requests []*CreatePrintServerRequest `json:"requests,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Requests") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Requests") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BatchCreatePrintServersRequest) MarshalJSON() ([]byte, error) { + type NoMethod BatchCreatePrintServersRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BatchCreatePrintServersResponse struct { + // Failures: A list of create failures. `PrintServer` IDs are not + // populated, as print servers were not created. + Failures []*PrintServerFailureInfo `json:"failures,omitempty"` + + // PrintServers: A list of successfully created print servers with their + // IDs populated. + PrintServers []*PrintServer `json:"printServers,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Failures") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Failures") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BatchCreatePrintServersResponse) MarshalJSON() ([]byte, error) { + type NoMethod BatchCreatePrintServersResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BatchCreatePrintersRequest: Request for adding new printers in batch. +type BatchCreatePrintersRequest struct { + // Requests: A list of Printers to be created. Max 50 at a time. + Requests []*CreatePrinterRequest `json:"requests,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Requests") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Requests") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BatchCreatePrintersRequest) MarshalJSON() ([]byte, error) { + type NoMethod BatchCreatePrintersRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BatchCreatePrintersResponse: Response for adding new printers in +// batch. +type BatchCreatePrintersResponse struct { + // Failures: A list of create failures. Printer IDs are not populated, + // as printer were not created. + Failures []*FailureInfo `json:"failures,omitempty"` + + // Printers: A list of successfully created printers with their IDs + // populated. + Printers []*Printer `json:"printers,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Failures") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Failures") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BatchCreatePrintersResponse) MarshalJSON() ([]byte, error) { + type NoMethod BatchCreatePrintersResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BatchDeletePrintServersRequest: Request to delete multiple existing +// print servers in a batch. +type BatchDeletePrintServersRequest struct { + // PrintServerIds: A list of print server IDs that should be deleted + // (max `100` per batch). + PrintServerIds []string `json:"printServerIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PrintServerIds") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PrintServerIds") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BatchDeletePrintServersRequest) MarshalJSON() ([]byte, error) { + type NoMethod BatchDeletePrintServersRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BatchDeletePrintServersResponse struct { + // FailedPrintServers: A list of update failures. + FailedPrintServers []*PrintServerFailureInfo `json:"failedPrintServers,omitempty"` + + // PrintServerIds: A list of print server IDs that were successfully + // deleted. + PrintServerIds []string `json:"printServerIds,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "FailedPrintServers") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FailedPrintServers") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BatchDeletePrintServersResponse) MarshalJSON() ([]byte, error) { + type NoMethod BatchDeletePrintServersResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BatchDeletePrintersRequest: Request for deleting existing printers in +// batch. +type BatchDeletePrintersRequest struct { + // PrinterIds: A list of Printer.id that should be deleted. Max 100 at a + // time. + PrinterIds []string `json:"printerIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PrinterIds") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PrinterIds") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BatchDeletePrintersRequest) MarshalJSON() ([]byte, error) { + type NoMethod BatchDeletePrintersRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BatchDeletePrintersResponse: Response for deleting existing printers +// in batch. +type BatchDeletePrintersResponse struct { + // FailedPrinters: A list of update failures. + FailedPrinters []*FailureInfo `json:"failedPrinters,omitempty"` + + // PrinterIds: A list of Printer.id that were successfully deleted. + PrinterIds []string `json:"printerIds,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "FailedPrinters") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FailedPrinters") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BatchDeletePrintersResponse) MarshalJSON() ([]byte, error) { + type NoMethod BatchDeletePrintersResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Building: Public API: Resources.buildings +type Building struct { + // Address: The postal address of the building. See `PostalAddress` + // (/my-business/reference/rest/v4/PostalAddress) for details. Note that + // only a single address line and region code are required. + Address *BuildingAddress `json:"address,omitempty"` + + // BuildingId: Unique identifier for the building. The maximum length is + // 100 characters. + BuildingId string `json:"buildingId,omitempty"` + + // BuildingName: The building name as seen by users in Calendar. Must be + // unique for the customer. For example, "NYC-CHEL". The maximum length + // is 100 characters. + BuildingName string `json:"buildingName,omitempty"` + + // Coordinates: The geographic coordinates of the center of the + // building, expressed as latitude and longitude in decimal degrees. + Coordinates *BuildingCoordinates `json:"coordinates,omitempty"` + + // Description: A brief description of the building. For example, + // "Chelsea Market". + Description string `json:"description,omitempty"` + + // Etags: ETag of the resource. + Etags string `json:"etags,omitempty"` + + // FloorNames: The display names for all floors in this building. The + // floors are expected to be sorted in ascending order, from lowest + // floor to highest floor. For example, ["B2", "B1", "L", "1", "2", + // "2M", "3", "PH"] Must contain at least one entry. + FloorNames []string `json:"floorNames,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Building) MarshalJSON() ([]byte, error) { + type NoMethod Building + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BuildingAddress: Public API: Resources.buildings +type BuildingAddress struct { + // AddressLines: Unstructured address lines describing the lower levels + // of an address. + AddressLines []string `json:"addressLines,omitempty"` + + // AdministrativeArea: Optional. Highest administrative subdivision + // which is used for postal addresses of a country or region. + AdministrativeArea string `json:"administrativeArea,omitempty"` + + // LanguageCode: Optional. BCP-47 language code of the contents of this + // address (if known). + LanguageCode string `json:"languageCode,omitempty"` + + // Locality: Optional. Generally refers to the city/town portion of the + // address. Examples: US city, IT comune, UK post town. In regions of + // the world where localities are not well defined or do not fit into + // this structure well, leave locality empty and use addressLines. + Locality string `json:"locality,omitempty"` + + // PostalCode: Optional. Postal code of the address. + PostalCode string `json:"postalCode,omitempty"` + + // RegionCode: Required. CLDR region code of the country/region of the + // address. + RegionCode string `json:"regionCode,omitempty"` + + // Sublocality: Optional. Sublocality of the address. + Sublocality string `json:"sublocality,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AddressLines") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AddressLines") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BuildingAddress) MarshalJSON() ([]byte, error) { + type NoMethod BuildingAddress + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BuildingCoordinates: Public API: Resources.buildings +type BuildingCoordinates struct { + // Latitude: Latitude in decimal degrees. + Latitude float64 `json:"latitude,omitempty"` + + // Longitude: Longitude in decimal degrees. + Longitude float64 `json:"longitude,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Latitude") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Latitude") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BuildingCoordinates) MarshalJSON() ([]byte, error) { + type NoMethod BuildingCoordinates + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *BuildingCoordinates) UnmarshalJSON(data []byte) error { + type NoMethod BuildingCoordinates + var s1 struct { + Latitude gensupport.JSONFloat64 `json:"latitude"` + Longitude gensupport.JSONFloat64 `json:"longitude"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Latitude = float64(s1.Latitude) + s.Longitude = float64(s1.Longitude) + return nil +} + +// Buildings: Public API: Resources.buildings +type Buildings struct { + // Buildings: The Buildings in this page of results. + Buildings []*Building `json:"buildings,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Buildings") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Buildings") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Buildings) MarshalJSON() ([]byte, error) { + type NoMethod Buildings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CalendarResource: Public API: Resources.calendars +type CalendarResource struct { + // BuildingId: Unique ID for the building a resource is located in. + BuildingId string `json:"buildingId,omitempty"` + + // Capacity: Capacity of a resource, number of seats in a room. + Capacity int64 `json:"capacity,omitempty"` + + // Etags: ETag of the resource. + Etags string `json:"etags,omitempty"` + + // FeatureInstances: Instances of features for the calendar resource. + FeatureInstances interface{} `json:"featureInstances,omitempty"` + + // FloorName: Name of the floor a resource is located on. + FloorName string `json:"floorName,omitempty"` + + // FloorSection: Name of the section within a floor a resource is + // located in. + FloorSection string `json:"floorSection,omitempty"` + + // GeneratedResourceName: The read-only auto-generated name of the + // calendar resource which includes metadata about the resource such as + // building name, floor, capacity, etc. For example, "NYC-2-Training + // Room 1A (16)". + GeneratedResourceName string `json:"generatedResourceName,omitempty"` + + // Kind: The type of the resource. For calendar resources, the value is + // `admin#directory#resources#calendars#CalendarResource`. + Kind string `json:"kind,omitempty"` + + // ResourceCategory: The category of the calendar resource. Either + // CONFERENCE_ROOM or OTHER. Legacy data is set to CATEGORY_UNKNOWN. + ResourceCategory string `json:"resourceCategory,omitempty"` + + // ResourceDescription: Description of the resource, visible only to + // admins. + ResourceDescription string `json:"resourceDescription,omitempty"` + + // ResourceEmail: The read-only email for the calendar resource. + // Generated as part of creating a new calendar resource. + ResourceEmail string `json:"resourceEmail,omitempty"` + + // ResourceId: The unique ID for the calendar resource. + ResourceId string `json:"resourceId,omitempty"` + + // ResourceName: The name of the calendar resource. For example, + // "Training Room 1A". + ResourceName string `json:"resourceName,omitempty"` + + // ResourceType: The type of the calendar resource, intended for + // non-room resources. + ResourceType string `json:"resourceType,omitempty"` + + // UserVisibleDescription: Description of the resource, visible to users + // and admins. + UserVisibleDescription string `json:"userVisibleDescription,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BuildingId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BuildingId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CalendarResource) MarshalJSON() ([]byte, error) { + type NoMethod CalendarResource + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CalendarResources: Public API: Resources.calendars +type CalendarResources struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Items: The CalendarResources in this page of results. + Items []*CalendarResource `json:"items,omitempty"` + + // Kind: Identifies this as a collection of CalendarResources. This is + // always `admin#directory#resources#calendars#calendarResourcesList`. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CalendarResources) MarshalJSON() ([]byte, error) { + type NoMethod CalendarResources + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Channel: An notification channel used to watch for resource changes. +type Channel struct { + // Address: The address where notifications are delivered for this + // channel. + Address string `json:"address,omitempty"` + + // Expiration: Date and time of notification channel expiration, + // expressed as a Unix timestamp, in milliseconds. Optional. + Expiration int64 `json:"expiration,omitempty,string"` + + // Id: A UUID or similar unique string that identifies this channel. + Id string `json:"id,omitempty"` + + // Kind: Identifies this as a notification channel used to watch for + // changes to a resource, which is `api#channel`. + Kind string `json:"kind,omitempty"` + + // Params: Additional parameters controlling delivery channel behavior. + // Optional. For example, `params.ttl` specifies the time-to-live in + // seconds for the notification channel, where the default is 2 hours + // and the maximum TTL is 2 days. + Params map[string]string `json:"params,omitempty"` + + // Payload: A Boolean value to indicate whether payload is wanted. + // Optional. + Payload bool `json:"payload,omitempty"` + + // ResourceId: An opaque ID that identifies the resource being watched + // on this channel. Stable across different API versions. + ResourceId string `json:"resourceId,omitempty"` + + // ResourceUri: A version-specific identifier for the watched resource. + ResourceUri string `json:"resourceUri,omitempty"` + + // Token: An arbitrary string delivered to the target address with each + // notification delivered over this channel. Optional. + Token string `json:"token,omitempty"` + + // Type: The type of delivery mechanism used for this channel. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Channel) MarshalJSON() ([]byte, error) { + type NoMethod Channel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ChromeOsDevice: Google Chrome devices run on the Chrome OS +// (https://support.google.com/chromeos). For more information about +// common API tasks, see the Developer's Guide +// (/admin-sdk/directory/v1/guides/manage-chrome-devices). +type ChromeOsDevice struct { + // ActiveTimeRanges: A list of active time ranges (Read-only). + ActiveTimeRanges []*ChromeOsDeviceActiveTimeRanges `json:"activeTimeRanges,omitempty"` + + // AnnotatedAssetId: The asset identifier as noted by an administrator + // or specified during enrollment. + AnnotatedAssetId string `json:"annotatedAssetId,omitempty"` + + // AnnotatedLocation: The address or location of the device as noted by + // the administrator. Maximum length is `200` characters. Empty values + // are allowed. + AnnotatedLocation string `json:"annotatedLocation,omitempty"` + + // AnnotatedUser: The user of the device as noted by the administrator. + // Maximum length is 100 characters. Empty values are allowed. + AnnotatedUser string `json:"annotatedUser,omitempty"` + + // AutoUpdateExpiration: (Read-only) The timestamp after which the + // device will stop receiving Chrome updates or support + AutoUpdateExpiration int64 `json:"autoUpdateExpiration,omitempty,string"` + + // BootMode: The boot mode for the device. The possible values are: * + // `Verified`: The device is running a valid version of the Chrome OS. * + // `Dev`: The devices's developer hardware switch is enabled. When + // booted, the device has a command line shell. For an example of a + // developer switch, see the Chromebook developer information + // (https://www.chromium.org/chromium-os/developer-information-for-chrome-os-devices/samsung-series-5-chromebook#TOC-Developer-switch). + BootMode string `json:"bootMode,omitempty"` + + // CpuInfo: Information regarding CPU specs in the device. + CpuInfo []*ChromeOsDeviceCpuInfo `json:"cpuInfo,omitempty"` + + // CpuStatusReports: Reports of CPU utilization and temperature + // (Read-only) + CpuStatusReports []*ChromeOsDeviceCpuStatusReports `json:"cpuStatusReports,omitempty"` + + // DeprovisionReason: (Read-only) Deprovision reason. + // + // Possible values: + // "deprovisionReasonUnspecified" - The deprovision reason is unknown. + // "deprovisionReasonSameModelReplacement" - The device was replaced + // by a device with the same model. + // "deprovisionReasonUpgrade" - The device was upgraded. + // "deprovisionReasonDomainMove" - The device's domain was changed. + // "deprovisionReasonServiceExpiration" - Service expired for the + // device. + // "deprovisionReasonOther" - The device was deprovisioned for a + // legacy reason that is no longer supported. + // "deprovisionReasonDifferentModelReplacement" - The device was + // replaced by a device with a different model. + // "deprovisionReasonRetiringDevice" - The device was retired. + // "deprovisionReasonUpgradeTransfer" - The device's perpetual upgrade + // was transferred to a new device. + // "deprovisionReasonNotRequired" - A reason was not required. For + // example, the licenses were returned to the customer's license pool. + // "deprovisionReasonRepairCenter" - The device was deprovisioned by a + // repair service center. + DeprovisionReason string `json:"deprovisionReason,omitempty"` + + // DeviceFiles: A list of device files to download (Read-only) + DeviceFiles []*ChromeOsDeviceDeviceFiles `json:"deviceFiles,omitempty"` + + // DeviceId: The unique ID of the Chrome device. + DeviceId string `json:"deviceId,omitempty"` + + // DeviceLicenseType: Output only. Device license type. + // + // Possible values: + // "deviceLicenseTypeUnspecified" - UNSPECIFIED type. + // "enterprise" - Indicating the device is a + // Chromebook/Chromebox/Chromebase enterprise, which is packaged with an + // upgrade(license). + // "enterpriseUpgrade" - Indicating the device is consuming standalone + // Chrome Enterprise Upgrade, a Chrome Enterprise license. + // "educationUpgrade" - Indicating the device is consuming Chrome + // Education Upgrade(AKA Chrome EDU perpetual license). + // "education" - Packaged with a license as education. + // "terminal" - Packaged with a license as terminal. + // "kioskUpgrade" - Indicating the device is consuming standalone + // Chrome Kiosk Upgrade, a Chrome Kiosk (annual) license. + DeviceLicenseType string `json:"deviceLicenseType,omitempty"` + + // DiskVolumeReports: Reports of disk space and other info about + // mounted/connected volumes. + DiskVolumeReports []*ChromeOsDeviceDiskVolumeReports `json:"diskVolumeReports,omitempty"` + + // DockMacAddress: (Read-only) Built-in MAC address for the docking + // station that the device connected to. Factory sets Media access + // control address (MAC address) assigned for use by a dock. It is + // reserved specifically for MAC pass through device policy. The format + // is twelve (12) hexadecimal digits without any delimiter (uppercase + // letters). This is only relevant for some devices. + DockMacAddress string `json:"dockMacAddress,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // EthernetMacAddress: The device's MAC address on the ethernet network + // interface. + EthernetMacAddress string `json:"ethernetMacAddress,omitempty"` + + // EthernetMacAddress0: (Read-only) MAC address used by the + // Chromebook’s internal ethernet port, and for onboard network + // (ethernet) interface. The format is twelve (12) hexadecimal digits + // without any delimiter (uppercase letters). This is only relevant for + // some devices. + EthernetMacAddress0 string `json:"ethernetMacAddress0,omitempty"` + + // FirmwareVersion: The Chrome device's firmware version. + FirmwareVersion string `json:"firmwareVersion,omitempty"` + + // FirstEnrollmentTime: Date and time for the first time the device was + // enrolled. + FirstEnrollmentTime string `json:"firstEnrollmentTime,omitempty"` + + // Kind: The type of resource. For the Chromeosdevices resource, the + // value is `admin#directory#chromeosdevice`. + Kind string `json:"kind,omitempty"` + + // LastDeprovisionTimestamp: (Read-only) Date and time for the last + // deprovision of the device. + LastDeprovisionTimestamp string `json:"lastDeprovisionTimestamp,omitempty"` + + // LastEnrollmentTime: Date and time the device was last enrolled + // (Read-only) + LastEnrollmentTime string `json:"lastEnrollmentTime,omitempty"` + + // LastKnownNetwork: Contains last known network (Read-only) + LastKnownNetwork []*ChromeOsDeviceLastKnownNetwork `json:"lastKnownNetwork,omitempty"` + + // LastSync: Date and time the device was last synchronized with the + // policy settings in the G Suite administrator control panel + // (Read-only) + LastSync string `json:"lastSync,omitempty"` + + // MacAddress: The device's wireless MAC address. If the device does not + // have this information, it is not included in the response. + MacAddress string `json:"macAddress,omitempty"` + + // ManufactureDate: (Read-only) The date the device was manufactured in + // yyyy-mm-dd format. + ManufactureDate string `json:"manufactureDate,omitempty"` + + // Meid: The Mobile Equipment Identifier (MEID) or the International + // Mobile Equipment Identity (IMEI) for the 3G mobile card in a mobile + // device. A MEID/IMEI is typically used when adding a device to a + // wireless carrier's post-pay service plan. If the device does not have + // this information, this property is not included in the response. For + // more information on how to export a MEID/IMEI list, see the + // Developer's Guide + // (/admin-sdk/directory/v1/guides/manage-chrome-devices.html#export_meid + // ). + Meid string `json:"meid,omitempty"` + + // Model: The device's model information. If the device does not have + // this information, this property is not included in the response. + Model string `json:"model,omitempty"` + + // Notes: Notes about this device added by the administrator. This + // property can be searched + // (https://support.google.com/chrome/a/answer/1698333) with the list + // (/admin-sdk/directory/v1/reference/chromeosdevices/list) method's + // `query` parameter. Maximum length is 500 characters. Empty values are + // allowed. + Notes string `json:"notes,omitempty"` + + // OrderNumber: The device's order number. Only devices directly + // purchased from Google have an order number. + OrderNumber string `json:"orderNumber,omitempty"` + + // OrgUnitId: The unique ID of the organizational unit. orgUnitPath is + // the human readable version of orgUnitId. While orgUnitPath may change + // by renaming an organizational unit within the path, orgUnitId is + // unchangeable for one organizational unit. This property can be + // updated + // (/admin-sdk/directory/v1/guides/manage-chrome-devices#move_chrome_devi + // ces_to_ou) using the API. For more information about how to create an + // organizational structure for your device, see the administration help + // center (https://support.google.com/a/answer/182433). + OrgUnitId string `json:"orgUnitId,omitempty"` + + // OrgUnitPath: The full parent path with the organizational unit's name + // associated with the device. Path names are case insensitive. If the + // parent organizational unit is the top-level organization, it is + // represented as a forward slash, `/`. This property can be updated + // (/admin-sdk/directory/v1/guides/manage-chrome-devices#move_chrome_devi + // ces_to_ou) using the API. For more information about how to create an + // organizational structure for your device, see the administration help + // center (https://support.google.com/a/answer/182433). + OrgUnitPath string `json:"orgUnitPath,omitempty"` + + // OsUpdateStatus: The status of the OS updates for the device. + OsUpdateStatus *OsUpdateStatus `json:"osUpdateStatus,omitempty"` + + // OsVersion: The Chrome device's operating system version. + OsVersion string `json:"osVersion,omitempty"` + + // PlatformVersion: The Chrome device's platform version. + PlatformVersion string `json:"platformVersion,omitempty"` + + // RecentUsers: A list of recent device users, in descending order, by + // last login time. + RecentUsers []*ChromeOsDeviceRecentUsers `json:"recentUsers,omitempty"` + + // ScreenshotFiles: A list of screenshot files to download. Type is + // always "SCREENSHOT_FILE". (Read-only) + ScreenshotFiles []*ChromeOsDeviceScreenshotFiles `json:"screenshotFiles,omitempty"` + + // SerialNumber: The Chrome device serial number entered when the device + // was enabled. This value is the same as the Admin console's *Serial + // Number* in the *Chrome OS Devices* tab. + SerialNumber string `json:"serialNumber,omitempty"` + + // Status: The status of the device. + Status string `json:"status,omitempty"` + + // SupportEndDate: Final date the device will be supported (Read-only) + SupportEndDate string `json:"supportEndDate,omitempty"` + + // SystemRamFreeReports: Reports of amounts of available RAM memory + // (Read-only) + SystemRamFreeReports []*ChromeOsDeviceSystemRamFreeReports `json:"systemRamFreeReports,omitempty"` + + // SystemRamTotal: Total RAM on the device [in bytes] (Read-only) + SystemRamTotal int64 `json:"systemRamTotal,omitempty,string"` + + // TpmVersionInfo: Trusted Platform Module (TPM) (Read-only) + TpmVersionInfo *ChromeOsDeviceTpmVersionInfo `json:"tpmVersionInfo,omitempty"` + + // WillAutoRenew: Determines if the device will auto renew its support + // after the support end date. This is a read-only property. + WillAutoRenew bool `json:"willAutoRenew,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ActiveTimeRanges") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ActiveTimeRanges") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDevice) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDevice + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ChromeOsDeviceActiveTimeRanges struct { + // ActiveTime: Duration of usage in milliseconds. + ActiveTime int64 `json:"activeTime,omitempty"` + + // Date: Date of usage + Date string `json:"date,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActiveTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ActiveTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceActiveTimeRanges) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceActiveTimeRanges + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ChromeOsDeviceCpuInfo: CPU specs for a CPU. +type ChromeOsDeviceCpuInfo struct { + // Architecture: The CPU architecture. + Architecture string `json:"architecture,omitempty"` + + // LogicalCpus: Information for the Logical CPUs + LogicalCpus []*ChromeOsDeviceCpuInfoLogicalCpus `json:"logicalCpus,omitempty"` + + // MaxClockSpeedKhz: The max CPU clock speed in kHz. + MaxClockSpeedKhz int64 `json:"maxClockSpeedKhz,omitempty"` + + // Model: The CPU model name. + Model string `json:"model,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Architecture") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Architecture") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceCpuInfo) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceCpuInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ChromeOsDeviceCpuInfoLogicalCpus: Status of a single logical CPU. +type ChromeOsDeviceCpuInfoLogicalCpus struct { + // CStates: C-States indicate the power consumption state of the CPU. + // For more information look at documentation published by the CPU + // maker. + CStates []*ChromeOsDeviceCpuInfoLogicalCpusCStates `json:"cStates,omitempty"` + + // CurrentScalingFrequencyKhz: Current frequency the CPU is running at. + CurrentScalingFrequencyKhz int64 `json:"currentScalingFrequencyKhz,omitempty"` + + // IdleDuration: Idle time since last boot. + IdleDuration string `json:"idleDuration,omitempty"` + + // MaxScalingFrequencyKhz: Maximum frequency the CPU is allowed to run + // at, by policy. + MaxScalingFrequencyKhz int64 `json:"maxScalingFrequencyKhz,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CStates") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CStates") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceCpuInfoLogicalCpus) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceCpuInfoLogicalCpus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ChromeOsDeviceCpuInfoLogicalCpusCStates: Status of a single C-state. +// C-states are various modes the CPU can transition to in order to use +// more or less power. +type ChromeOsDeviceCpuInfoLogicalCpusCStates struct { + // DisplayName: Name of the state. + DisplayName string `json:"displayName,omitempty"` + + // SessionDuration: Time spent in the state since the last reboot. + SessionDuration string `json:"sessionDuration,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceCpuInfoLogicalCpusCStates) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceCpuInfoLogicalCpusCStates + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ChromeOsDeviceCpuStatusReports struct { + // CpuTemperatureInfo: A list of CPU temperature samples. + CpuTemperatureInfo []*ChromeOsDeviceCpuStatusReportsCpuTemperatureInfo `json:"cpuTemperatureInfo,omitempty"` + + CpuUtilizationPercentageInfo []int64 `json:"cpuUtilizationPercentageInfo,omitempty"` + + // ReportTime: Date and time the report was received. + ReportTime string `json:"reportTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CpuTemperatureInfo") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CpuTemperatureInfo") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceCpuStatusReports) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceCpuStatusReports + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ChromeOsDeviceCpuStatusReportsCpuTemperatureInfo struct { + // Label: CPU label + Label string `json:"label,omitempty"` + + // Temperature: Temperature in Celsius degrees. + Temperature int64 `json:"temperature,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Label") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Label") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceCpuStatusReportsCpuTemperatureInfo) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceCpuStatusReportsCpuTemperatureInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ChromeOsDeviceDeviceFiles struct { + // CreateTime: Date and time the file was created + CreateTime string `json:"createTime,omitempty"` + + // DownloadUrl: File download URL + DownloadUrl string `json:"downloadUrl,omitempty"` + + // Name: File name + Name string `json:"name,omitempty"` + + // Type: File type + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceDeviceFiles) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceDeviceFiles + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ChromeOsDeviceDiskVolumeReports struct { + // VolumeInfo: Disk volumes + VolumeInfo []*ChromeOsDeviceDiskVolumeReportsVolumeInfo `json:"volumeInfo,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VolumeInfo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VolumeInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceDiskVolumeReports) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceDiskVolumeReports + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ChromeOsDeviceDiskVolumeReportsVolumeInfo struct { + // StorageFree: Free disk space [in bytes] + StorageFree int64 `json:"storageFree,omitempty,string"` + + // StorageTotal: Total disk space [in bytes] + StorageTotal int64 `json:"storageTotal,omitempty,string"` + + // VolumeId: Volume id + VolumeId string `json:"volumeId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "StorageFree") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "StorageFree") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceDiskVolumeReportsVolumeInfo) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceDiskVolumeReportsVolumeInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ChromeOsDeviceLastKnownNetwork: Information for an ip address. +type ChromeOsDeviceLastKnownNetwork struct { + // IpAddress: The IP address. + IpAddress string `json:"ipAddress,omitempty"` + + // WanIpAddress: The WAN IP address. + WanIpAddress string `json:"wanIpAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpAddress") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpAddress") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceLastKnownNetwork) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceLastKnownNetwork + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ChromeOsDeviceRecentUsers: A list of recent device users, in +// descending order, by last login time. +type ChromeOsDeviceRecentUsers struct { + // Email: The user's email address. This is only present if the user + // type is `USER_TYPE_MANAGED`. + Email string `json:"email,omitempty"` + + // Type: The type of the user. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Email") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Email") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceRecentUsers) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceRecentUsers + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ChromeOsDeviceScreenshotFiles struct { + // CreateTime: Date and time the file was created + CreateTime string `json:"createTime,omitempty"` + + // DownloadUrl: File download URL + DownloadUrl string `json:"downloadUrl,omitempty"` + + // Name: File name + Name string `json:"name,omitempty"` + + // Type: File type + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceScreenshotFiles) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceScreenshotFiles + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ChromeOsDeviceSystemRamFreeReports struct { + // ReportTime: Date and time the report was received. + ReportTime string `json:"reportTime,omitempty"` + + SystemRamFreeInfo googleapi.Int64s `json:"systemRamFreeInfo,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ReportTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ReportTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceSystemRamFreeReports) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceSystemRamFreeReports + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ChromeOsDeviceTpmVersionInfo: Trusted Platform Module (TPM) +// (Read-only) +type ChromeOsDeviceTpmVersionInfo struct { + // Family: TPM family. We use the TPM 2.0 style encoding, e.g.: TPM 1.2: + // "1.2" -> 312e3200 TPM 2.0: "2.0" -> 322e3000 + Family string `json:"family,omitempty"` + + // FirmwareVersion: TPM firmware version. + FirmwareVersion string `json:"firmwareVersion,omitempty"` + + // Manufacturer: TPM manufacturer code. + Manufacturer string `json:"manufacturer,omitempty"` + + // SpecLevel: TPM specification level. See Library Specification for TPM + // 2.0 and Main Specification for TPM 1.2. + SpecLevel string `json:"specLevel,omitempty"` + + // TpmModel: TPM model number. + TpmModel string `json:"tpmModel,omitempty"` + + // VendorSpecific: Vendor-specific information such as Vendor ID. + VendorSpecific string `json:"vendorSpecific,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Family") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Family") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceTpmVersionInfo) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceTpmVersionInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ChromeOsDeviceAction: Data about an update to the status of a Chrome +// OS device. +type ChromeOsDeviceAction struct { + // Action: Action to be taken on the Chrome OS device. + Action string `json:"action,omitempty"` + + // DeprovisionReason: Only used when the action is `deprovision`. With + // the `deprovision` action, this field is required. *Note*: The + // deprovision reason is audited because it might have implications on + // licenses for perpetual subscription customers. + DeprovisionReason string `json:"deprovisionReason,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDeviceAction) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDeviceAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ChromeOsDevices struct { + // Chromeosdevices: A list of Chrome OS Device objects. + Chromeosdevices []*ChromeOsDevice `json:"chromeosdevices,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // NextPageToken: Token used to access the next page of this result. To + // access the next page, use this token's value in the `pageToken` query + // string of this request. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Chromeosdevices") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Chromeosdevices") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsDevices) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsDevices + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ChromeOsMoveDevicesToOu struct { + // DeviceIds: Chrome OS devices to be moved to OU + DeviceIds []string `json:"deviceIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DeviceIds") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DeviceIds") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChromeOsMoveDevicesToOu) MarshalJSON() ([]byte, error) { + type NoMethod ChromeOsMoveDevicesToOu + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CreatePrintServerRequest: Request for adding a new print server. +type CreatePrintServerRequest struct { + // Parent: Required. The unique ID + // (https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) + // of the customer's Google Workspace account. Format: `customers/{id}` + Parent string `json:"parent,omitempty"` + + // PrintServer: Required. A print server to create. If you want to place + // the print server under a specific organizational unit (OU), then + // populate the `org_unit_id`. Otherwise the print server is created + // under the root OU. The `org_unit_id` can be retrieved using the + // Directory API + // (https://developers.google.com/admin-sdk/directory/v1/guides/manage-org-units). + PrintServer *PrintServer `json:"printServer,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Parent") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Parent") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreatePrintServerRequest) MarshalJSON() ([]byte, error) { + type NoMethod CreatePrintServerRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CreatePrinterRequest: Request for adding a new printer. +type CreatePrinterRequest struct { + // Parent: Required. The name of the customer. Format: + // customers/{customer_id} + Parent string `json:"parent,omitempty"` + + // Printer: Required. A printer to create. If you want to place the + // printer under particular OU then populate printer.org_unit_id filed. + // Otherwise the printer will be placed under root OU. + Printer *Printer `json:"printer,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Parent") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Parent") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreatePrinterRequest) MarshalJSON() ([]byte, error) { + type NoMethod CreatePrinterRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Customer struct { + // AlternateEmail: The customer's secondary contact email address. This + // email address cannot be on the same domain as the `customerDomain` + AlternateEmail string `json:"alternateEmail,omitempty"` + + // CustomerCreationTime: The customer's creation time (Readonly) + CustomerCreationTime string `json:"customerCreationTime,omitempty"` + + // CustomerDomain: The customer's primary domain name string. Do not + // include the `www` prefix when creating a new customer. + CustomerDomain string `json:"customerDomain,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Id: The unique ID for the customer's Google Workspace account. + // (Readonly) + Id string `json:"id,omitempty"` + + // Kind: Identifies the resource as a customer. Value: + // `admin#directory#customer` + Kind string `json:"kind,omitempty"` + + // Language: The customer's ISO 639-2 language code. See the Language + // Codes (/admin-sdk/directory/v1/languages) page for the list of + // supported codes. Valid language codes outside the supported set will + // be accepted by the API but may lead to unexpected behavior. The + // default value is `en`. + Language string `json:"language,omitempty"` + + // PhoneNumber: The customer's contact phone number in E.164 + // (https://en.wikipedia.org/wiki/E.164) format. + PhoneNumber string `json:"phoneNumber,omitempty"` + + // PostalAddress: The customer's postal address information. + PostalAddress *CustomerPostalAddress `json:"postalAddress,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AlternateEmail") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AlternateEmail") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Customer) MarshalJSON() ([]byte, error) { + type NoMethod Customer + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CustomerPostalAddress struct { + // AddressLine1: A customer's physical address. The address can be + // composed of one to three lines. + AddressLine1 string `json:"addressLine1,omitempty"` + + // AddressLine2: Address line 2 of the address. + AddressLine2 string `json:"addressLine2,omitempty"` + + // AddressLine3: Address line 3 of the address. + AddressLine3 string `json:"addressLine3,omitempty"` + + // ContactName: The customer contact's name. + ContactName string `json:"contactName,omitempty"` + + // CountryCode: This is a required property. For `countryCode` + // information see the ISO 3166 country code elements + // (https://www.iso.org/iso/country_codes.htm). + CountryCode string `json:"countryCode,omitempty"` + + // Locality: Name of the locality. An example of a locality value is the + // city of `San Francisco`. + Locality string `json:"locality,omitempty"` + + // OrganizationName: The company or company division name. + OrganizationName string `json:"organizationName,omitempty"` + + // PostalCode: The postal code. A postalCode example is a postal zip + // code such as `10009`. This is in accordance with - http: + // //portablecontacts.net/draft-spec.html#address_element. + PostalCode string `json:"postalCode,omitempty"` + + // Region: Name of the region. An example of a region value is `NY` for + // the state of New York. + Region string `json:"region,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AddressLine1") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AddressLine1") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CustomerPostalAddress) MarshalJSON() ([]byte, error) { + type NoMethod CustomerPostalAddress + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DirectoryChromeosdevicesCommand: Information regarding a command that +// was issued to a device. +type DirectoryChromeosdevicesCommand struct { + // CommandExpireTime: The time at which the command will expire. If the + // device doesn't execute the command within this time the command will + // become expired. + CommandExpireTime string `json:"commandExpireTime,omitempty"` + + // CommandId: Unique ID of a device command. + CommandId int64 `json:"commandId,omitempty,string"` + + // CommandResult: The result of the command execution. + CommandResult *DirectoryChromeosdevicesCommandResult `json:"commandResult,omitempty"` + + // IssueTime: The timestamp when the command was issued by the admin. + IssueTime string `json:"issueTime,omitempty"` + + // Payload: The payload that the command specified, if any. + Payload string `json:"payload,omitempty"` + + // State: Indicates the command state. + // + // Possible values: + // "STATE_UNSPECIFIED" - The command status was unspecified. + // "PENDING" - An unexpired command not yet sent to the client. + // "EXPIRED" - The command didn't get executed by the client within + // the expected time. + // "CANCELLED" - The command is cancelled by admin while in PENDING. + // "SENT_TO_CLIENT" - The command has been sent to the client. + // "ACKED_BY_CLIENT" - The client has responded that it received the + // command. + // "EXECUTED_BY_CLIENT" - The client has (un)successfully executed the + // command. + State string `json:"state,omitempty"` + + // Type: The type of the command. + // + // Possible values: + // "COMMAND_TYPE_UNSPECIFIED" - The command type was unspecified. + // "REBOOT" - Reboot the device. Can only be issued to Kiosk and + // managed guest session devices. + // "TAKE_A_SCREENSHOT" - Take a screenshot of the device. Only + // available if the device is in Kiosk Mode. + // "SET_VOLUME" - Set the volume of the device. Can only be issued to + // Kiosk and managed guest session devices. + // "WIPE_USERS" - Wipe all the users off of the device. Executing this + // command in the device will remove all user profile data, but it will + // keep device policy and enrollment. + // "REMOTE_POWERWASH" - Wipes the device by performing a power wash. + // Executing this command in the device will remove all data including + // user policies, device policies and enrollment policies. Warning: This + // will revert the device back to a factory state with no enrollment + // unless the device is subject to forced or auto enrollment. Use with + // caution, as this is an irreversible action! + // "DEVICE_START_CRD_SESSION" - Starts a Chrome Remote Desktop + // session. + // "CAPTURE_LOGS" - Capture the system logs of a kiosk device. The + // logs can be downloaded from the downloadUrl link present in + // `deviceFiles` field of + // [chromeosdevices](https://developers.google.com/admin-sdk/directory/re + // ference/rest/v1/chromeosdevices) + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CommandExpireTime") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommandExpireTime") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DirectoryChromeosdevicesCommand) MarshalJSON() ([]byte, error) { + type NoMethod DirectoryChromeosdevicesCommand + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DirectoryChromeosdevicesCommandResult: The result of executing a +// command. +type DirectoryChromeosdevicesCommandResult struct { + // CommandResultPayload: The payload for the command result. The + // following commands respond with a payload: * + // `DEVICE_START_CRD_SESSION`: Payload is a stringified JSON object in + // the form: { "url": url }. The URL provides a link to the Chrome + // Remote Desktop session. + CommandResultPayload string `json:"commandResultPayload,omitempty"` + + // ErrorMessage: The error message with a short explanation as to why + // the command failed. Only present if the command failed. + ErrorMessage string `json:"errorMessage,omitempty"` + + // ExecuteTime: The time at which the command was executed or failed to + // execute. + ExecuteTime string `json:"executeTime,omitempty"` + + // Result: The result of the command. + // + // Possible values: + // "COMMAND_RESULT_TYPE_UNSPECIFIED" - The command result was + // unspecified. + // "IGNORED" - The command was ignored as obsolete. + // "FAILURE" - The command could not be executed successfully. + // "SUCCESS" - The command was successfully executed. + Result string `json:"result,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "CommandResultPayload") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommandResultPayload") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DirectoryChromeosdevicesCommandResult) MarshalJSON() ([]byte, error) { + type NoMethod DirectoryChromeosdevicesCommandResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DirectoryChromeosdevicesIssueCommandRequest: A request for issuing a +// command. +type DirectoryChromeosdevicesIssueCommandRequest struct { + // CommandType: The type of command. + // + // Possible values: + // "COMMAND_TYPE_UNSPECIFIED" - The command type was unspecified. + // "REBOOT" - Reboot the device. Can only be issued to Kiosk and + // managed guest session devices. + // "TAKE_A_SCREENSHOT" - Take a screenshot of the device. Only + // available if the device is in Kiosk Mode. + // "SET_VOLUME" - Set the volume of the device. Can only be issued to + // Kiosk and managed guest session devices. + // "WIPE_USERS" - Wipe all the users off of the device. Executing this + // command in the device will remove all user profile data, but it will + // keep device policy and enrollment. + // "REMOTE_POWERWASH" - Wipes the device by performing a power wash. + // Executing this command in the device will remove all data including + // user policies, device policies and enrollment policies. Warning: This + // will revert the device back to a factory state with no enrollment + // unless the device is subject to forced or auto enrollment. Use with + // caution, as this is an irreversible action! + // "DEVICE_START_CRD_SESSION" - Starts a Chrome Remote Desktop + // session. + // "CAPTURE_LOGS" - Capture the system logs of a kiosk device. The + // logs can be downloaded from the downloadUrl link present in + // `deviceFiles` field of + // [chromeosdevices](https://developers.google.com/admin-sdk/directory/re + // ference/rest/v1/chromeosdevices) + CommandType string `json:"commandType,omitempty"` + + // Payload: The payload for the command, provide it only if command + // supports it. The following commands support adding payload: * + // `SET_VOLUME`: Payload is a stringified JSON object in the form: { + // "volume": 50 }. The volume has to be an integer in the range [0,100]. + // * `DEVICE_START_CRD_SESSION`: Payload is optionally a stringified + // JSON object in the form: { "ackedUserPresence": true }. + // `ackedUserPresence` is a boolean. By default, `ackedUserPresence` is + // set to `false`. To start a Chrome Remote Desktop session for an + // active device, set `ackedUserPresence` to `true`. + Payload string `json:"payload,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CommandType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommandType") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DirectoryChromeosdevicesIssueCommandRequest) MarshalJSON() ([]byte, error) { + type NoMethod DirectoryChromeosdevicesIssueCommandRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DirectoryChromeosdevicesIssueCommandResponse: A response for issuing +// a command. +type DirectoryChromeosdevicesIssueCommandResponse struct { + // CommandId: The unique ID of the issued command, used to retrieve the + // command status. + CommandId int64 `json:"commandId,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CommandId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommandId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DirectoryChromeosdevicesIssueCommandResponse) MarshalJSON() ([]byte, error) { + type NoMethod DirectoryChromeosdevicesIssueCommandResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DomainAlias struct { + // CreationTime: The creation time of the domain alias. (Read-only). + CreationTime int64 `json:"creationTime,omitempty,string"` + + // DomainAliasName: The domain alias name. + DomainAliasName string `json:"domainAliasName,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // ParentDomainName: The parent domain name that the domain alias is + // associated with. This can either be a primary or secondary domain + // name within a customer. + ParentDomainName string `json:"parentDomainName,omitempty"` + + // Verified: Indicates the verification state of a domain alias. + // (Read-only) + Verified bool `json:"verified,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DomainAlias) MarshalJSON() ([]byte, error) { + type NoMethod DomainAlias + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DomainAliases struct { + // DomainAliases: A list of domain alias objects. + DomainAliases []*DomainAlias `json:"domainAliases,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "DomainAliases") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DomainAliases") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DomainAliases) MarshalJSON() ([]byte, error) { + type NoMethod DomainAliases + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Domains struct { + // CreationTime: Creation time of the domain. Expressed in Unix time + // (https://en.wikipedia.org/wiki/Epoch_time) format. (Read-only). + CreationTime int64 `json:"creationTime,omitempty,string"` + + // DomainAliases: A list of domain alias objects. (Read-only) + DomainAliases []*DomainAlias `json:"domainAliases,omitempty"` + + // DomainName: The domain name of the customer. + DomainName string `json:"domainName,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // IsPrimary: Indicates if the domain is a primary domain (Read-only). + IsPrimary bool `json:"isPrimary,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // Verified: Indicates the verification state of a domain. (Read-only). + Verified bool `json:"verified,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Domains) MarshalJSON() ([]byte, error) { + type NoMethod Domains + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Domains2 struct { + // Domains: A list of domain objects. + Domains []*Domains `json:"domains,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Domains") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Domains") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Domains2) MarshalJSON() ([]byte, error) { + type NoMethod Domains2 + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + +// FailureInfo: Info about failures +type FailureInfo struct { + // ErrorCode: Canonical code for why the update failed to apply. + // + // Possible values: + // "OK" - Not an error; returned on success. HTTP Mapping: 200 OK + // "CANCELLED" - The operation was cancelled, typically by the caller. + // HTTP Mapping: 499 Client Closed Request + // "UNKNOWN" - Unknown error. For example, this error may be returned + // when a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also errors + // raised by APIs that do not return enough error information may be + // converted to this error. HTTP Mapping: 500 Internal Server Error + // "INVALID_ARGUMENT" - The client specified an invalid argument. Note + // that this differs from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` + // indicates arguments that are problematic regardless of the state of + // the system (e.g., a malformed file name). HTTP Mapping: 400 Bad + // Request + // "DEADLINE_EXCEEDED" - The deadline expired before the operation + // could complete. For operations that change the state of the system, + // this error may be returned even if the operation has completed + // successfully. For example, a successful response from a server could + // have been delayed long enough for the deadline to expire. HTTP + // Mapping: 504 Gateway Timeout + // "NOT_FOUND" - Some requested entity (e.g., file or directory) was + // not found. Note to server developers: if a request is denied for an + // entire class of users, such as gradual feature rollout or + // undocumented allowlist, `NOT_FOUND` may be used. If a request is + // denied for some users within a class of users, such as user-based + // access control, `PERMISSION_DENIED` must be used. HTTP Mapping: 404 + // Not Found + // "ALREADY_EXISTS" - The entity that a client attempted to create + // (e.g., file or directory) already exists. HTTP Mapping: 409 Conflict + // "PERMISSION_DENIED" - The caller does not have permission to + // execute the specified operation. `PERMISSION_DENIED` must not be used + // for rejections caused by exhausting some resource (use + // `RESOURCE_EXHAUSTED` instead for those errors). `PERMISSION_DENIED` + // must not be used if the caller can not be identified (use + // `UNAUTHENTICATED` instead for those errors). This error code does not + // imply the request is valid or the requested entity exists or + // satisfies other pre-conditions. HTTP Mapping: 403 Forbidden + // "UNAUTHENTICATED" - The request does not have valid authentication + // credentials for the operation. HTTP Mapping: 401 Unauthorized + // "RESOURCE_EXHAUSTED" - Some resource has been exhausted, perhaps a + // per-user quota, or perhaps the entire file system is out of space. + // HTTP Mapping: 429 Too Many Requests + // "FAILED_PRECONDITION" - The operation was rejected because the + // system is not in a state required for the operation's execution. For + // example, the directory to be deleted is non-empty, an rmdir operation + // is applied to a non-directory, etc. Service implementors can use the + // following guidelines to decide between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`: (a) Use `UNAVAILABLE` if the client can + // retry just the failing call. (b) Use `ABORTED` if the client should + // retry at a higher level. For example, when a client-specified + // test-and-set fails, indicating the client should restart a + // read-modify-write sequence. (c) Use `FAILED_PRECONDITION` if the + // client should not retry until the system state has been explicitly + // fixed. For example, if an "rmdir" fails because the directory is + // non-empty, `FAILED_PRECONDITION` should be returned since the client + // should not retry unless the files are deleted from the directory. + // HTTP Mapping: 400 Bad Request + // "ABORTED" - The operation was aborted, typically due to a + // concurrency issue such as a sequencer check failure or transaction + // abort. See the guidelines above for deciding between + // `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: + // 409 Conflict + // "OUT_OF_RANGE" - The operation was attempted past the valid range. + // E.g., seeking or reading past end-of-file. Unlike `INVALID_ARGUMENT`, + // this error indicates a problem that may be fixed if the system state + // changes. For example, a 32-bit file system will generate + // `INVALID_ARGUMENT` if asked to read at an offset that is not in the + // range [0,2^32-1], but it will generate `OUT_OF_RANGE` if asked to + // read from an offset past the current file size. There is a fair bit + // of overlap between `FAILED_PRECONDITION` and `OUT_OF_RANGE`. We + // recommend using `OUT_OF_RANGE` (the more specific error) when it + // applies so that callers who are iterating through a space can easily + // look for an `OUT_OF_RANGE` error to detect when they are done. HTTP + // Mapping: 400 Bad Request + // "UNIMPLEMENTED" - The operation is not implemented or is not + // supported/enabled in this service. HTTP Mapping: 501 Not Implemented + // "INTERNAL" - Internal errors. This means that some invariants + // expected by the underlying system have been broken. This error code + // is reserved for serious errors. HTTP Mapping: 500 Internal Server + // Error + // "UNAVAILABLE" - The service is currently unavailable. This is most + // likely a transient condition, which can be corrected by retrying with + // a backoff. Note that it is not always safe to retry non-idempotent + // operations. See the guidelines above for deciding between + // `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: + // 503 Service Unavailable + // "DATA_LOSS" - Unrecoverable data loss or corruption. HTTP Mapping: + // 500 Internal Server Error + ErrorCode string `json:"errorCode,omitempty"` + + // ErrorMessage: Failure reason message. + ErrorMessage string `json:"errorMessage,omitempty"` + + // Printer: Failed printer. + Printer *Printer `json:"printer,omitempty"` + + // PrinterId: Id of a failed printer. + PrinterId string `json:"printerId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorCode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorCode") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FailureInfo) MarshalJSON() ([]byte, error) { + type NoMethod FailureInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Feature: JSON template for Feature object in Directory API. +type Feature struct { + // Etags: ETag of the resource. + Etags string `json:"etags,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // Name: The name of the feature. + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etags") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etags") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Feature) MarshalJSON() ([]byte, error) { + type NoMethod Feature + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// FeatureInstance: JSON template for a feature instance. +type FeatureInstance struct { + // Feature: The feature that this is an instance of. A calendar resource + // may have multiple instances of a feature. + Feature *Feature `json:"feature,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Feature") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Feature") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FeatureInstance) MarshalJSON() ([]byte, error) { + type NoMethod FeatureInstance + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type FeatureRename struct { + // NewName: New name of the feature. + NewName string `json:"newName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NewName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NewName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FeatureRename) MarshalJSON() ([]byte, error) { + type NoMethod FeatureRename + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Features: Public API: Resources.features +type Features struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Features: The Features in this page of results. + Features []*Feature `json:"features,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Features) MarshalJSON() ([]byte, error) { + type NoMethod Features + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Group: Google Groups provide your users the ability to send messages +// to groups of people using the group's email address. For more +// information about common tasks, see the Developer's Guide +// (https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups). +// For information about other types of groups, see the Cloud Identity +// Groups API documentation +// (https://cloud.google.com/identity/docs/groups). Note: The user +// calling the API (or being impersonated by a service account) must +// have an assigned role +// (https://developers.google.com/admin-sdk/directory/v1/guides/manage-roles) +// that includes Admin API Groups permissions, such as Super Admin or +// Groups Admin. +type Group struct { + // AdminCreated: Read-only. Value is `true` if this group was created by + // an administrator rather than a user. + AdminCreated bool `json:"adminCreated,omitempty"` + + // Aliases: Read-only. The list of a group's alias email addresses. To + // add, update, or remove a group's aliases, use the `groups.aliases` + // methods. If edited in a group's POST or PUT request, the edit is + // ignored. + Aliases []string `json:"aliases,omitempty"` + + // Description: An extended description to help users determine the + // purpose of a group. For example, you can include information about + // who should join the group, the types of messages to send to the + // group, links to FAQs about the group, or related groups. Maximum + // length is `4,096` characters. + Description string `json:"description,omitempty"` + + // DirectMembersCount: The number of users that are direct members of + // the group. If a group is a member (child) of this group (the parent), + // members of the child group are not counted in the + // `directMembersCount` property of the parent group. + DirectMembersCount int64 `json:"directMembersCount,omitempty,string"` + + // Email: The group's email address. If your account has multiple + // domains, select the appropriate domain for the email address. The + // `email` must be unique. This property is required when creating a + // group. Group email addresses are subject to the same character usage + // rules as usernames, see the help center + // (https://support.google.com/a/answer/9193374) for details. + Email string `json:"email,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Id: Read-only. The unique ID of a group. A group `id` can be used as + // a group request URI's `groupKey`. + Id string `json:"id,omitempty"` + + // Kind: The type of the API resource. For Groups resources, the value + // is `admin#directory#group`. + Kind string `json:"kind,omitempty"` + + // Name: The group's display name. + Name string `json:"name,omitempty"` + + // NonEditableAliases: Read-only. The list of the group's non-editable + // alias email addresses that are outside of the account's primary + // domain or subdomains. These are functioning email addresses used by + // the group. This is a read-only property returned in the API's + // response for a group. If edited in a group's POST or PUT request, the + // edit is ignored. + NonEditableAliases []string `json:"nonEditableAliases,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AdminCreated") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdminCreated") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Group) MarshalJSON() ([]byte, error) { + type NoMethod Group + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GroupAlias: The Directory API manages aliases, which are alternative +// email addresses. +type GroupAlias struct { + // Alias: The alias email address. + Alias string `json:"alias,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Id: The unique ID of the group. + Id string `json:"id,omitempty"` + + // Kind: The type of the API resource. For Alias resources, the value is + // `admin#directory#alias`. + Kind string `json:"kind,omitempty"` + + // PrimaryEmail: The primary email address of the group. + PrimaryEmail string `json:"primaryEmail,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Alias") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Alias") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GroupAlias) MarshalJSON() ([]byte, error) { + type NoMethod GroupAlias + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Groups struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Groups: A list of group objects. + Groups []*Group `json:"groups,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // NextPageToken: Token used to access next page of this result. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Groups) MarshalJSON() ([]byte, error) { + type NoMethod Groups + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ListPrintServersResponse struct { + // NextPageToken: A token that can be sent as `page_token` in a request + // to retrieve the next page. If this field is omitted, there are no + // subsequent pages. + NextPageToken string `json:"nextPageToken,omitempty"` + + // PrintServers: List of print servers. + PrintServers []*PrintServer `json:"printServers,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListPrintServersResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListPrintServersResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListPrinterModelsResponse: Response for listing allowed printer +// models. +type ListPrinterModelsResponse struct { + // NextPageToken: A token, which can be sent as `page_token` to retrieve + // the next page. If this field is omitted, there are no subsequent + // pages. + NextPageToken string `json:"nextPageToken,omitempty"` + + // PrinterModels: Printer models that are currently allowed to be + // configured for ChromeOs. Some printers may be added or removed over + // time. + PrinterModels []*PrinterModel `json:"printerModels,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListPrinterModelsResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListPrinterModelsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListPrintersResponse: Response for listing printers. +type ListPrintersResponse struct { + // NextPageToken: A token, which can be sent as `page_token` to retrieve + // the next page. If this field is omitted, there are no subsequent + // pages. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Printers: List of printers. If `org_unit_id` was given in the + // request, then only printers visible for this OU will be returned. If + // `org_unit_id` was not given in the request, then all printers will be + // returned. + Printers []*Printer `json:"printers,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListPrintersResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListPrintersResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Member: A Google Groups member can be a user or another group. This +// member can be inside or outside of your account's domains. For more +// information about common group member tasks, see the Developer's +// Guide (/admin-sdk/directory/v1/guides/manage-group-members). +type Member struct { + // DeliverySettings: Defines mail delivery preferences of member. This + // field is only supported by `insert`, `update`, and `get` methods. + DeliverySettings string `json:"delivery_settings,omitempty"` + + // Email: The member's email address. A member can be a user or another + // group. This property is required when adding a member to a group. The + // `email` must be unique and cannot be an alias of another group. If + // the email address is changed, the API automatically reflects the + // email address changes. + Email string `json:"email,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Id: The unique ID of the group member. A member `id` can be used as a + // member request URI's `memberKey`. + Id string `json:"id,omitempty"` + + // Kind: The type of the API resource. For Members resources, the value + // is `admin#directory#member`. + Kind string `json:"kind,omitempty"` + + // Role: The member's role in a group. The API returns an error for + // cycles in group memberships. For example, if `group1` is a member of + // `group2`, `group2` cannot be a member of `group1`. For more + // information about a member's role, see the administration help center + // (https://support.google.com/a/answer/167094). + Role string `json:"role,omitempty"` + + // Status: Status of member (Immutable) + Status string `json:"status,omitempty"` + + // Type: The type of group member. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "DeliverySettings") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DeliverySettings") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Member) MarshalJSON() ([]byte, error) { + type NoMethod Member + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Members struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // Members: A list of member objects. + Members []*Member `json:"members,omitempty"` + + // NextPageToken: Token used to access next page of this result. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Members) MarshalJSON() ([]byte, error) { + type NoMethod Members + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MembersHasMember: JSON template for Has Member response in Directory +// API. +type MembersHasMember struct { + // IsMember: Output only. Identifies whether the given user is a member + // of the group. Membership can be direct or nested. + IsMember bool `json:"isMember,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "IsMember") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IsMember") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MembersHasMember) MarshalJSON() ([]byte, error) { + type NoMethod MembersHasMember + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MobileDevice: Google Workspace Mobile Management includes Android, +// Google Sync (https://support.google.com/a/answer/135937), and iOS +// devices. For more information about common group mobile device API +// tasks, see the Developer's Guide +// (/admin-sdk/directory/v1/guides/manage-mobile-devices.html). +type MobileDevice struct { + // AdbStatus: Adb (USB debugging) enabled or disabled on device + // (Read-only) + AdbStatus bool `json:"adbStatus,omitempty"` + + // Applications: The list of applications installed on an Android mobile + // device. It is not applicable to Google Sync and iOS devices. The list + // includes any Android applications that access Google Workspace data. + // When updating an applications list, it is important to note that + // updates replace the existing list. If the Android device has two + // existing applications and the API updates the list with five + // applications, the is now the updated list of five applications. + Applications []*MobileDeviceApplications `json:"applications,omitempty"` + + // BasebandVersion: The device's baseband version. + BasebandVersion string `json:"basebandVersion,omitempty"` + + // BootloaderVersion: Mobile Device Bootloader version (Read-only) + BootloaderVersion string `json:"bootloaderVersion,omitempty"` + + // Brand: Mobile Device Brand (Read-only) + Brand string `json:"brand,omitempty"` + + // BuildNumber: The device's operating system build number. + BuildNumber string `json:"buildNumber,omitempty"` + + // DefaultLanguage: The default locale used on the device. + DefaultLanguage string `json:"defaultLanguage,omitempty"` + + // DeveloperOptionsStatus: Developer options enabled or disabled on + // device (Read-only) + DeveloperOptionsStatus bool `json:"developerOptionsStatus,omitempty"` + + // DeviceCompromisedStatus: The compromised device status. + DeviceCompromisedStatus string `json:"deviceCompromisedStatus,omitempty"` + + // DeviceId: The serial number for a Google Sync mobile device. For + // Android and iOS devices, this is a software generated unique + // identifier. + DeviceId string `json:"deviceId,omitempty"` + + // DevicePasswordStatus: DevicePasswordStatus (Read-only) + DevicePasswordStatus string `json:"devicePasswordStatus,omitempty"` + + // Email: The list of the owner's email addresses. If your application + // needs the current list of user emails, use the get + // (/admin-sdk/directory/v1/reference/mobiledevices/get.html) method. + // For additional information, see the retrieve a user + // (/admin-sdk/directory/v1/guides/manage-users#get_user) method. + Email []string `json:"email,omitempty"` + + // EncryptionStatus: Mobile Device Encryption Status (Read-only) + EncryptionStatus string `json:"encryptionStatus,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // FirstSync: Date and time the device was first synchronized with the + // policy settings in the G Suite administrator control panel + // (Read-only) + FirstSync string `json:"firstSync,omitempty"` + + // Hardware: Mobile Device Hardware (Read-only) + Hardware string `json:"hardware,omitempty"` + + // HardwareId: The IMEI/MEID unique identifier for Android hardware. It + // is not applicable to Google Sync devices. When adding an Android + // mobile device, this is an optional property. When updating one of + // these devices, this is a read-only property. + HardwareId string `json:"hardwareId,omitempty"` + + // Imei: The device's IMEI number. + Imei string `json:"imei,omitempty"` + + // KernelVersion: The device's kernel version. + KernelVersion string `json:"kernelVersion,omitempty"` + + // Kind: The type of the API resource. For Mobiledevices resources, the + // value is `admin#directory#mobiledevice`. + Kind string `json:"kind,omitempty"` + + // LastSync: Date and time the device was last synchronized with the + // policy settings in the G Suite administrator control panel + // (Read-only) + LastSync string `json:"lastSync,omitempty"` + + // ManagedAccountIsOnOwnerProfile: Boolean indicating if this account is + // on owner/primary profile or not. + ManagedAccountIsOnOwnerProfile bool `json:"managedAccountIsOnOwnerProfile,omitempty"` + + // Manufacturer: Mobile Device manufacturer (Read-only) + Manufacturer string `json:"manufacturer,omitempty"` + + // Meid: The device's MEID number. + Meid string `json:"meid,omitempty"` + + // Model: The mobile device's model name, for example Nexus S. This + // property can be updated + // (/admin-sdk/directory/v1/reference/mobiledevices/update.html). For + // more information, see the Developer's Guide + // (/admin-sdk/directory/v1/guides/manage-mobile=devices#update_mobile_de + // vice). + Model string `json:"model,omitempty"` + + // Name: The list of the owner's user names. If your application needs + // the current list of device owner names, use the get + // (/admin-sdk/directory/v1/reference/mobiledevices/get.html) method. + // For more information about retrieving mobile device user information, + // see the Developer's Guide + // (/admin-sdk/directory/v1/guides/manage-users#get_user). + Name []string `json:"name,omitempty"` + + // NetworkOperator: Mobile Device mobile or network operator (if + // available) (Read-only) + NetworkOperator string `json:"networkOperator,omitempty"` + + // Os: The mobile device's operating system, for example IOS 4.3 or + // Android 2.3.5. This property can be updated + // (/admin-sdk/directory/v1/reference/mobiledevices/update.html). For + // more information, see the Developer's Guide + // (/admin-sdk/directory/v1/guides/manage-mobile-devices#update_mobile_de + // vice). + Os string `json:"os,omitempty"` + + // OtherAccountsInfo: The list of accounts added on device (Read-only) + OtherAccountsInfo []string `json:"otherAccountsInfo,omitempty"` + + // Privilege: DMAgentPermission (Read-only) + Privilege string `json:"privilege,omitempty"` + + // ReleaseVersion: Mobile Device release version version (Read-only) + ReleaseVersion string `json:"releaseVersion,omitempty"` + + // ResourceId: The unique ID the API service uses to identify the mobile + // device. + ResourceId string `json:"resourceId,omitempty"` + + // SecurityPatchLevel: Mobile Device Security patch level (Read-only) + SecurityPatchLevel int64 `json:"securityPatchLevel,omitempty,string"` + + // SerialNumber: The device's serial number. + SerialNumber string `json:"serialNumber,omitempty"` + + // Status: The device's status. + Status string `json:"status,omitempty"` + + // SupportsWorkProfile: Work profile supported on device (Read-only) + SupportsWorkProfile bool `json:"supportsWorkProfile,omitempty"` + + // Type: The type of mobile device. + Type string `json:"type,omitempty"` + + // UnknownSourcesStatus: Unknown sources enabled or disabled on device + // (Read-only) + UnknownSourcesStatus bool `json:"unknownSourcesStatus,omitempty"` + + // UserAgent: Gives information about the device such as `os` version. + // This property can be updated + // (/admin-sdk/directory/v1/reference/mobiledevices/update.html). For + // more information, see the Developer's Guide + // (/admin-sdk/directory/v1/guides/manage-mobile-devices#update_mobile_de + // vice). + UserAgent string `json:"userAgent,omitempty"` + + // WifiMacAddress: The device's MAC address on Wi-Fi networks. + WifiMacAddress string `json:"wifiMacAddress,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AdbStatus") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdbStatus") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MobileDevice) MarshalJSON() ([]byte, error) { + type NoMethod MobileDevice + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MobileDeviceApplications struct { + // DisplayName: The application's display name. An example is `Browser`. + DisplayName string `json:"displayName,omitempty"` + + // PackageName: The application's package name. An example is + // `com.android.browser`. + PackageName string `json:"packageName,omitempty"` + + // Permission: The list of permissions of this application. These can be + // either a standard Android permission or one defined by the + // application, and are found in an application's Android manifest + // (https://developer.android.com/guide/topics/manifest/uses-permission-element.html). + // Examples of a Calendar application's permissions are `READ_CALENDAR`, + // or `MANAGE_ACCOUNTS`. + Permission []string `json:"permission,omitempty"` + + // VersionCode: The application's version code. An example is `13`. + VersionCode int64 `json:"versionCode,omitempty"` + + // VersionName: The application's version name. An example is + // `3.2-140714`. + VersionName string `json:"versionName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MobileDeviceApplications) MarshalJSON() ([]byte, error) { + type NoMethod MobileDeviceApplications + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MobileDeviceAction struct { + // Action: The action to be performed on the device. + Action string `json:"action,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MobileDeviceAction) MarshalJSON() ([]byte, error) { + type NoMethod MobileDeviceAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MobileDevices struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // Mobiledevices: A list of Mobile Device objects. + Mobiledevices []*MobileDevice `json:"mobiledevices,omitempty"` + + // NextPageToken: Token used to access next page of this result. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MobileDevices) MarshalJSON() ([]byte, error) { + type NoMethod MobileDevices + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OrgUnit: Managing your account's organizational units allows you to +// configure your users' access to services and custom settings. For +// more information about common organizational unit tasks, see the +// Developer's Guide +// (/admin-sdk/directory/v1/guides/manage-org-units.html). The +// customer's organizational unit hierarchy is limited to 35 levels of +// depth. +type OrgUnit struct { + // BlockInheritance: Determines if a sub-organizational unit can inherit + // the settings of the parent organization. The default value is + // `false`, meaning a sub-organizational unit inherits the settings of + // the nearest parent organizational unit. We recommend using the + // default value because setting `block_inheritance` to `true` can have + // _unintended consequences_. For more information about inheritance and + // users in an organization structure, see the administration help + // center (https://support.google.com/a/answer/4352075). + BlockInheritance bool `json:"blockInheritance,omitempty"` + + // Description: Description of the organizational unit. + Description string `json:"description,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: The type of the API resource. For Orgunits resources, the value + // is `admin#directory#orgUnit`. + Kind string `json:"kind,omitempty"` + + // Name: The organizational unit's path name. For example, an + // organizational unit's name within the /corp/support/sales_support + // parent path is sales_support. Required. + Name string `json:"name,omitempty"` + + // OrgUnitId: The unique ID of the organizational unit. + OrgUnitId string `json:"orgUnitId,omitempty"` + + // OrgUnitPath: The full path to the organizational unit. The + // `orgUnitPath` is a derived property. When listed, it is derived from + // `parentOrgunitPath` and organizational unit's `name`. For example, + // for an organizational unit named 'apps' under parent organization + // '/engineering', the orgUnitPath is '/engineering/apps'. In order to + // edit an `orgUnitPath`, either update the name of the organization or + // the `parentOrgunitPath`. A user's organizational unit determines + // which Google Workspace services the user has access to. If the user + // is moved to a new organization, the user's access changes. For more + // information about organization structures, see the administration + // help center (https://support.google.com/a/answer/4352075). For more + // information about moving a user to a different organization, see + // Update a user + // (/admin-sdk/directory/v1/guides/manage-users.html#update_user). + OrgUnitPath string `json:"orgUnitPath,omitempty"` + + // ParentOrgUnitId: The unique ID of the parent organizational unit. + // Required, unless `parentOrgUnitPath` is set. + ParentOrgUnitId string `json:"parentOrgUnitId,omitempty"` + + // ParentOrgUnitPath: The organizational unit's parent path. For + // example, /corp/sales is the parent path for /corp/sales/sales_support + // organizational unit. Required, unless `parentOrgUnitId` is set. + ParentOrgUnitPath string `json:"parentOrgUnitPath,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BlockInheritance") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BlockInheritance") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *OrgUnit) MarshalJSON() ([]byte, error) { + type NoMethod OrgUnit + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type OrgUnits struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: The type of the API resource. For Org Unit resources, the type + // is `admin#directory#orgUnits`. + Kind string `json:"kind,omitempty"` + + // OrganizationUnits: A list of organizational unit objects. + OrganizationUnits []*OrgUnit `json:"organizationUnits,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OrgUnits) MarshalJSON() ([]byte, error) { + type NoMethod OrgUnits + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OsUpdateStatus: Contains information regarding the current OS update +// status. +type OsUpdateStatus struct { + // RebootTime: Date and time of the last reboot. + RebootTime string `json:"rebootTime,omitempty"` + + // State: The update state of an OS update. + // + // Possible values: + // "updateStateUnspecified" - The update state is unspecified. + // "updateStateNotStarted" - There is an update pending but it hasn't + // started. + // "updateStateDownloadInProgress" - The pending update is being + // downloaded. + // "updateStateNeedReboot" - The device is ready to install the + // update, but must reboot. + State string `json:"state,omitempty"` + + // TargetKioskAppVersion: New required platform version from the pending + // updated kiosk app. + TargetKioskAppVersion string `json:"targetKioskAppVersion,omitempty"` + + // TargetOsVersion: New platform version of the OS image being + // downloaded and applied. It is only set when update status is + // UPDATE_STATUS_DOWNLOAD_IN_PROGRESS or UPDATE_STATUS_NEED_REBOOT. Note + // this could be a dummy "0.0.0.0" for UPDATE_STATUS_NEED_REBOOT for + // some edge cases, e.g. update engine is restarted without a reboot. + TargetOsVersion string `json:"targetOsVersion,omitempty"` + + // UpdateCheckTime: Date and time of the last update check. + UpdateCheckTime string `json:"updateCheckTime,omitempty"` + + // UpdateTime: Date and time of the last successful OS update. + UpdateTime string `json:"updateTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RebootTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RebootTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OsUpdateStatus) MarshalJSON() ([]byte, error) { + type NoMethod OsUpdateStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PrintServer: Configuration for a print server. +type PrintServer struct { + // CreateTime: Output only. Time when the print server was created. + CreateTime string `json:"createTime,omitempty"` + + // Description: Editable. Description of the print server (as shown in + // the Admin console). + Description string `json:"description,omitempty"` + + // DisplayName: Editable. Display name of the print server (as shown in + // the Admin console). + DisplayName string `json:"displayName,omitempty"` + + // Id: Immutable. ID of the print server. Leave empty when creating. + Id string `json:"id,omitempty"` + + // Name: Immutable. Resource name of the print server. Leave empty when + // creating. Format: + // `customers/{customer.id}/printServers/{print_server.id}` + Name string `json:"name,omitempty"` + + // OrgUnitId: ID of the organization unit (OU) that owns this print + // server. This value can only be set when the print server is initially + // created. If it's not populated, the print server is placed under the + // root OU. The `org_unit_id` can be retrieved using the Directory API + // (/admin-sdk/directory/reference/rest/v1/orgunits). + OrgUnitId string `json:"orgUnitId,omitempty"` + + // Uri: Editable. Print server URI. + Uri string `json:"uri,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PrintServer) MarshalJSON() ([]byte, error) { + type NoMethod PrintServer + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PrintServerFailureInfo: Info about failures +type PrintServerFailureInfo struct { + // ErrorCode: Canonical code for why the update failed to apply. + // + // Possible values: + // "OK" - Not an error; returned on success. HTTP Mapping: 200 OK + // "CANCELLED" - The operation was cancelled, typically by the caller. + // HTTP Mapping: 499 Client Closed Request + // "UNKNOWN" - Unknown error. For example, this error may be returned + // when a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also errors + // raised by APIs that do not return enough error information may be + // converted to this error. HTTP Mapping: 500 Internal Server Error + // "INVALID_ARGUMENT" - The client specified an invalid argument. Note + // that this differs from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` + // indicates arguments that are problematic regardless of the state of + // the system (e.g., a malformed file name). HTTP Mapping: 400 Bad + // Request + // "DEADLINE_EXCEEDED" - The deadline expired before the operation + // could complete. For operations that change the state of the system, + // this error may be returned even if the operation has completed + // successfully. For example, a successful response from a server could + // have been delayed long enough for the deadline to expire. HTTP + // Mapping: 504 Gateway Timeout + // "NOT_FOUND" - Some requested entity (e.g., file or directory) was + // not found. Note to server developers: if a request is denied for an + // entire class of users, such as gradual feature rollout or + // undocumented allowlist, `NOT_FOUND` may be used. If a request is + // denied for some users within a class of users, such as user-based + // access control, `PERMISSION_DENIED` must be used. HTTP Mapping: 404 + // Not Found + // "ALREADY_EXISTS" - The entity that a client attempted to create + // (e.g., file or directory) already exists. HTTP Mapping: 409 Conflict + // "PERMISSION_DENIED" - The caller does not have permission to + // execute the specified operation. `PERMISSION_DENIED` must not be used + // for rejections caused by exhausting some resource (use + // `RESOURCE_EXHAUSTED` instead for those errors). `PERMISSION_DENIED` + // must not be used if the caller can not be identified (use + // `UNAUTHENTICATED` instead for those errors). This error code does not + // imply the request is valid or the requested entity exists or + // satisfies other pre-conditions. HTTP Mapping: 403 Forbidden + // "UNAUTHENTICATED" - The request does not have valid authentication + // credentials for the operation. HTTP Mapping: 401 Unauthorized + // "RESOURCE_EXHAUSTED" - Some resource has been exhausted, perhaps a + // per-user quota, or perhaps the entire file system is out of space. + // HTTP Mapping: 429 Too Many Requests + // "FAILED_PRECONDITION" - The operation was rejected because the + // system is not in a state required for the operation's execution. For + // example, the directory to be deleted is non-empty, an rmdir operation + // is applied to a non-directory, etc. Service implementors can use the + // following guidelines to decide between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`: (a) Use `UNAVAILABLE` if the client can + // retry just the failing call. (b) Use `ABORTED` if the client should + // retry at a higher level. For example, when a client-specified + // test-and-set fails, indicating the client should restart a + // read-modify-write sequence. (c) Use `FAILED_PRECONDITION` if the + // client should not retry until the system state has been explicitly + // fixed. For example, if an "rmdir" fails because the directory is + // non-empty, `FAILED_PRECONDITION` should be returned since the client + // should not retry unless the files are deleted from the directory. + // HTTP Mapping: 400 Bad Request + // "ABORTED" - The operation was aborted, typically due to a + // concurrency issue such as a sequencer check failure or transaction + // abort. See the guidelines above for deciding between + // `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: + // 409 Conflict + // "OUT_OF_RANGE" - The operation was attempted past the valid range. + // E.g., seeking or reading past end-of-file. Unlike `INVALID_ARGUMENT`, + // this error indicates a problem that may be fixed if the system state + // changes. For example, a 32-bit file system will generate + // `INVALID_ARGUMENT` if asked to read at an offset that is not in the + // range [0,2^32-1], but it will generate `OUT_OF_RANGE` if asked to + // read from an offset past the current file size. There is a fair bit + // of overlap between `FAILED_PRECONDITION` and `OUT_OF_RANGE`. We + // recommend using `OUT_OF_RANGE` (the more specific error) when it + // applies so that callers who are iterating through a space can easily + // look for an `OUT_OF_RANGE` error to detect when they are done. HTTP + // Mapping: 400 Bad Request + // "UNIMPLEMENTED" - The operation is not implemented or is not + // supported/enabled in this service. HTTP Mapping: 501 Not Implemented + // "INTERNAL" - Internal errors. This means that some invariants + // expected by the underlying system have been broken. This error code + // is reserved for serious errors. HTTP Mapping: 500 Internal Server + // Error + // "UNAVAILABLE" - The service is currently unavailable. This is most + // likely a transient condition, which can be corrected by retrying with + // a backoff. Note that it is not always safe to retry non-idempotent + // operations. See the guidelines above for deciding between + // `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. HTTP Mapping: + // 503 Service Unavailable + // "DATA_LOSS" - Unrecoverable data loss or corruption. HTTP Mapping: + // 500 Internal Server Error + ErrorCode string `json:"errorCode,omitempty"` + + // ErrorMessage: Failure reason message. + ErrorMessage string `json:"errorMessage,omitempty"` + + // PrintServer: Failed print server. + PrintServer *PrintServer `json:"printServer,omitempty"` + + // PrintServerId: ID of a failed print server. + PrintServerId string `json:"printServerId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorCode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorCode") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PrintServerFailureInfo) MarshalJSON() ([]byte, error) { + type NoMethod PrintServerFailureInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Printer: Printer configuration. +type Printer struct { + // AuxiliaryMessages: Output only. Auxiliary messages about issues with + // the printer configuration if any. + AuxiliaryMessages []*AuxiliaryMessage `json:"auxiliaryMessages,omitempty"` + + // CreateTime: Output only. Time when printer was created. + CreateTime string `json:"createTime,omitempty"` + + // Description: Editable. Description of printer. + Description string `json:"description,omitempty"` + + // DisplayName: Editable. Name of printer. + DisplayName string `json:"displayName,omitempty"` + + // Id: Id of the printer. (During printer creation leave empty) + Id string `json:"id,omitempty"` + + // MakeAndModel: Editable. Make and model of printer. e.g. Lexmark + // MS610de Value must be in format as seen in ListPrinterModels + // response. + MakeAndModel string `json:"makeAndModel,omitempty"` + + // Name: The resource name of the Printer object, in the format + // customers/{customer-id}/printers/{printer-id} (During printer + // creation leave empty) + Name string `json:"name,omitempty"` + + // OrgUnitId: Organization Unit that owns this printer (Only can be set + // during Printer creation) + OrgUnitId string `json:"orgUnitId,omitempty"` + + // Uri: Editable. Printer URI. + Uri string `json:"uri,omitempty"` + + // UseDriverlessConfig: Editable. flag to use driverless configuration + // or not. If it's set to be true, make_and_model can be ignored + UseDriverlessConfig bool `json:"useDriverlessConfig,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AuxiliaryMessages") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuxiliaryMessages") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Printer) MarshalJSON() ([]byte, error) { + type NoMethod Printer + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PrinterModel: Printer manufacturer and model +type PrinterModel struct { + // DisplayName: Display name. eq. "Brother MFC-8840D" + DisplayName string `json:"displayName,omitempty"` + + // MakeAndModel: Make and model as represented in "make_and_model" field + // in Printer object. eq. "brother mfc-8840d" + MakeAndModel string `json:"makeAndModel,omitempty"` + + // Manufacturer: Manufacturer. eq. "Brother" + Manufacturer string `json:"manufacturer,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PrinterModel) MarshalJSON() ([]byte, error) { + type NoMethod PrinterModel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Privilege struct { + // ChildPrivileges: A list of child privileges. Privileges for a service + // form a tree. Each privilege can have a list of child privileges; this + // list is empty for a leaf privilege. + ChildPrivileges []*Privilege `json:"childPrivileges,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // IsOuScopable: If the privilege can be restricted to an organization + // unit. + IsOuScopable bool `json:"isOuScopable,omitempty"` + + // Kind: The type of the API resource. This is always + // `admin#directory#privilege`. + Kind string `json:"kind,omitempty"` + + // PrivilegeName: The name of the privilege. + PrivilegeName string `json:"privilegeName,omitempty"` + + // ServiceId: The obfuscated ID of the service this privilege is for. + // This value is returned with `Privileges.list()` + // (/admin-sdk/directory/v1/reference/privileges/list). + ServiceId string `json:"serviceId,omitempty"` + + // ServiceName: The name of the service this privilege is for. + ServiceName string `json:"serviceName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ChildPrivileges") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ChildPrivileges") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Privilege) MarshalJSON() ([]byte, error) { + type NoMethod Privilege + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Privileges struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Items: A list of Privilege resources. + Items []*Privilege `json:"items,omitempty"` + + // Kind: The type of the API resource. This is always + // `admin#directory#privileges`. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Privileges) MarshalJSON() ([]byte, error) { + type NoMethod Privileges + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Role struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // IsSuperAdminRole: Returns `true` if the role is a super admin role. + IsSuperAdminRole bool `json:"isSuperAdminRole,omitempty"` + + // IsSystemRole: Returns `true` if this is a pre-defined system role. + IsSystemRole bool `json:"isSystemRole,omitempty"` + + // Kind: The type of the API resource. This is always + // `admin#directory#role`. + Kind string `json:"kind,omitempty"` + + // RoleDescription: A short description of the role. + RoleDescription string `json:"roleDescription,omitempty"` + + // RoleId: ID of the role. + RoleId int64 `json:"roleId,omitempty,string"` + + // RoleName: Name of the role. + RoleName string `json:"roleName,omitempty"` + + // RolePrivileges: The set of privileges that are granted to this role. + RolePrivileges []*RoleRolePrivileges `json:"rolePrivileges,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Role) MarshalJSON() ([]byte, error) { + type NoMethod Role + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RoleRolePrivileges struct { + // PrivilegeName: The name of the privilege. + PrivilegeName string `json:"privilegeName,omitempty"` + + // ServiceId: The obfuscated ID of the service this privilege is for. + // This value is returned with `Privileges.list()` + // (/admin-sdk/directory/v1/reference/privileges/list). + ServiceId string `json:"serviceId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PrivilegeName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PrivilegeName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RoleRolePrivileges) MarshalJSON() ([]byte, error) { + type NoMethod RoleRolePrivileges + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RoleAssignment: Defines an assignment of a role. +type RoleAssignment struct { + // AssignedTo: The unique ID of the entity this role is assigned + // to—either the `user_id` of a user, the `group_id` of a group, or + // the `uniqueId` of a service account as defined in Identity and Access + // Management (IAM) + // (https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts). + AssignedTo string `json:"assignedTo,omitempty"` + + // AssigneeType: Output only. The type of the assignee (`USER` or + // `GROUP`). + // + // Possible values: + // "user" - An individual user within the domain. + // "group" - A group within the domain. + AssigneeType string `json:"assigneeType,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: The type of the API resource. This is always + // `admin#directory#roleAssignment`. + Kind string `json:"kind,omitempty"` + + // OrgUnitId: If the role is restricted to an organization unit, this + // contains the ID for the organization unit the exercise of this role + // is restricted to. + OrgUnitId string `json:"orgUnitId,omitempty"` + + // RoleAssignmentId: ID of this roleAssignment. + RoleAssignmentId int64 `json:"roleAssignmentId,omitempty,string"` + + // RoleId: The ID of the role that is assigned. + RoleId int64 `json:"roleId,omitempty,string"` + + // ScopeType: The scope in which this role is assigned. + ScopeType string `json:"scopeType,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AssignedTo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AssignedTo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RoleAssignment) MarshalJSON() ([]byte, error) { + type NoMethod RoleAssignment + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RoleAssignments struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Items: A list of RoleAssignment resources. + Items []*RoleAssignment `json:"items,omitempty"` + + // Kind: The type of the API resource. This is always + // `admin#directory#roleAssignments`. + Kind string `json:"kind,omitempty"` + + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RoleAssignments) MarshalJSON() ([]byte, error) { + type NoMethod RoleAssignments + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Roles struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Items: A list of Role resources. + Items []*Role `json:"items,omitempty"` + + // Kind: The type of the API resource. This is always + // `admin#directory#roles`. + Kind string `json:"kind,omitempty"` + + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Roles) MarshalJSON() ([]byte, error) { + type NoMethod Roles + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Schema: The type of API resource. For Schema resources, this is +// always `admin#directory#schema`. +type Schema struct { + // DisplayName: Display name for the schema. + DisplayName string `json:"displayName,omitempty"` + + // Etag: The ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Fields: A list of fields in the schema. + Fields []*SchemaFieldSpec `json:"fields,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // SchemaId: The unique identifier of the schema (Read-only) + SchemaId string `json:"schemaId,omitempty"` + + // SchemaName: The schema's name. Each `schema_name` must be unique + // within a customer. Reusing a name results in a `409: Entity already + // exists` error. + SchemaName string `json:"schemaName,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Schema) MarshalJSON() ([]byte, error) { + type NoMethod Schema + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SchemaFieldSpec: You can use schemas to add custom fields to user +// profiles. You can use these fields to store information such as the +// projects your users work on, their physical locations, their hire +// dates, or whatever else fits your business needs. For more +// information, see Custom User Fields +// (/admin-sdk/directory/v1/guides/manage-schemas). +type SchemaFieldSpec struct { + // DisplayName: Display Name of the field. + DisplayName string `json:"displayName,omitempty"` + + // Etag: The ETag of the field. + Etag string `json:"etag,omitempty"` + + // FieldId: The unique identifier of the field (Read-only) + FieldId string `json:"fieldId,omitempty"` + + // FieldName: The name of the field. + FieldName string `json:"fieldName,omitempty"` + + // FieldType: The type of the field. + FieldType string `json:"fieldType,omitempty"` + + // Indexed: Boolean specifying whether the field is indexed or not. + // Default: `true`. + // + // Default: true + Indexed *bool `json:"indexed,omitempty"` + + // Kind: The kind of resource this is. For schema fields this is always + // `admin#directory#schema#fieldspec`. + Kind string `json:"kind,omitempty"` + + // MultiValued: A boolean specifying whether this is a multi-valued + // field or not. Default: `false`. + MultiValued bool `json:"multiValued,omitempty"` + + // NumericIndexingSpec: Indexing spec for a numeric field. By default, + // only exact match queries will be supported for numeric fields. + // Setting the `numericIndexingSpec` allows range queries to be + // supported. + NumericIndexingSpec *SchemaFieldSpecNumericIndexingSpec `json:"numericIndexingSpec,omitempty"` + + // ReadAccessType: Specifies who can view values of this field. See + // Retrieve users as a non-administrator + // (/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin) + // for more information. Note: It may take up to 24 hours for changes + // to this field to be reflected. + ReadAccessType string `json:"readAccessType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SchemaFieldSpec) MarshalJSON() ([]byte, error) { + type NoMethod SchemaFieldSpec + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SchemaFieldSpecNumericIndexingSpec: Indexing spec for a numeric +// field. By default, only exact match queries will be supported for +// numeric fields. Setting the `numericIndexingSpec` allows range +// queries to be supported. +type SchemaFieldSpecNumericIndexingSpec struct { + // MaxValue: Maximum value of this field. This is meant to be indicative + // rather than enforced. Values outside this range will still be + // indexed, but search may not be as performant. + MaxValue float64 `json:"maxValue,omitempty"` + + // MinValue: Minimum value of this field. This is meant to be indicative + // rather than enforced. Values outside this range will still be + // indexed, but search may not be as performant. + MinValue float64 `json:"minValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxValue") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxValue") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SchemaFieldSpecNumericIndexingSpec) MarshalJSON() ([]byte, error) { + type NoMethod SchemaFieldSpecNumericIndexingSpec + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *SchemaFieldSpecNumericIndexingSpec) UnmarshalJSON(data []byte) error { + type NoMethod SchemaFieldSpecNumericIndexingSpec + var s1 struct { + MaxValue gensupport.JSONFloat64 `json:"maxValue"` + MinValue gensupport.JSONFloat64 `json:"minValue"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.MaxValue = float64(s1.MaxValue) + s.MinValue = float64(s1.MinValue) + return nil +} + +// Schemas: JSON response template for List Schema operation in +// Directory API. +type Schemas struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // Schemas: A list of UserSchema objects. + Schemas []*Schema `json:"schemas,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Schemas) MarshalJSON() ([]byte, error) { + type NoMethod Schemas + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Token: JSON template for token resource in Directory API. +type Token struct { + // Anonymous: Whether the application is registered with Google. The + // value is `true` if the application has an anonymous Client ID. + Anonymous bool `json:"anonymous,omitempty"` + + // ClientId: The Client ID of the application the token is issued to. + ClientId string `json:"clientId,omitempty"` + + // DisplayText: The displayable name of the application the token is + // issued to. + DisplayText string `json:"displayText,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: The type of the API resource. This is always + // `admin#directory#token`. + Kind string `json:"kind,omitempty"` + + // NativeApp: Whether the token is issued to an installed application. + // The value is `true` if the application is installed to a desktop or + // mobile device. + NativeApp bool `json:"nativeApp,omitempty"` + + // Scopes: A list of authorization scopes the application is granted. + Scopes []string `json:"scopes,omitempty"` + + // UserKey: The unique ID of the user that issued the token. + UserKey string `json:"userKey,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Anonymous") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Anonymous") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Token) MarshalJSON() ([]byte, error) { + type NoMethod Token + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Tokens: JSON response template for List tokens operation in Directory +// API. +type Tokens struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Items: A list of Token resources. + Items []*Token `json:"items,omitempty"` + + // Kind: The type of the API resource. This is always + // `admin#directory#tokenList`. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Tokens) MarshalJSON() ([]byte, error) { + type NoMethod Tokens + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// User: The Directory API allows you to create and manage your +// account's users, user aliases, and user Google profile photos. For +// more information about common tasks, see the User Accounts +// Developer's Guide (/admin-sdk/directory/v1/guides/manage-users.html) +// and the User Aliases Developer's Guide +// (/admin-sdk/directory/v1/guides/manage-user-aliases.html). +type User struct { + // Addresses: The list of the user's addresses. The maximum allowed data + // size for this field is 10KB. + Addresses interface{} `json:"addresses,omitempty"` + + // AgreedToTerms: Output only. This property is `true` if the user has + // completed an initial login and accepted the Terms of Service + // agreement. + AgreedToTerms bool `json:"agreedToTerms,omitempty"` + + // Aliases: Output only. The list of the user's alias email addresses. + Aliases []string `json:"aliases,omitempty"` + + // Archived: Indicates if user is archived. + Archived bool `json:"archived,omitempty"` + + // ChangePasswordAtNextLogin: Indicates if the user is forced to change + // their password at next login. This setting doesn't apply when the + // user signs in via a third-party identity provider + // (https://support.google.com/a/answer/60224). + ChangePasswordAtNextLogin bool `json:"changePasswordAtNextLogin,omitempty"` + + // CreationTime: User's G Suite account creation time. (Read-only) + CreationTime string `json:"creationTime,omitempty"` + + // CustomSchemas: Custom fields of the user. The key is a `schema_name` + // and its values are `'field_name': 'field_value'`. + CustomSchemas map[string]googleapi.RawMessage `json:"customSchemas,omitempty"` + + // CustomerId: Output only. The customer ID to retrieve all account + // users + // (/admin-sdk/directory/v1/guides/manage-users.html#get_all_users). You + // can use the alias `my_customer` to represent your account's + // `customerId`. As a reseller administrator, you can use the resold + // customer account's `customerId`. To get a `customerId`, use the + // account's primary domain in the `domain` parameter of a users.list + // (/admin-sdk/directory/v1/reference/users/list) request. + CustomerId string `json:"customerId,omitempty"` + + DeletionTime string `json:"deletionTime,omitempty"` + + // Emails: The list of the user's email addresses. The maximum allowed + // data size for this field is 10KB. + Emails interface{} `json:"emails,omitempty"` + + // Etag: Output only. ETag of the resource. + Etag string `json:"etag,omitempty"` + + // ExternalIds: The list of external IDs for the user, such as an + // employee or network ID. The maximum allowed data size for this field + // is 2KB. + ExternalIds interface{} `json:"externalIds,omitempty"` + + // Gender: The user's gender. The maximum allowed data size for this + // field is 1KB. + Gender interface{} `json:"gender,omitempty"` + + // HashFunction: Stores the hash format of the `password` property. The + // following `hashFunction` values are allowed: * `MD5` - Accepts simple + // hex-encoded values. * `SHA-1` - Accepts simple hex-encoded values. * + // `crypt` - Compliant with the C crypt library + // (https://en.wikipedia.org/wiki/Crypt_%28C%29). Supports the DES, MD5 + // (hash prefix `$1$`), SHA-256 (hash prefix `$5$`), and SHA-512 (hash + // prefix `$6$`) hash algorithms. If rounds are specified as part of the + // prefix, they must be 10,000 or fewer. + HashFunction string `json:"hashFunction,omitempty"` + + // Id: The unique ID for the user. A user `id` can be used as a user + // request URI's `userKey`. + Id string `json:"id,omitempty"` + + // Ims: The list of the user's Instant Messenger (IM) accounts. A user + // account can have multiple ims properties. But, only one of these ims + // properties can be the primary IM contact. The maximum allowed data + // size for this field is 2KB. + Ims interface{} `json:"ims,omitempty"` + + // IncludeInGlobalAddressList: Indicates if the user's profile is + // visible in the Google Workspace global address list when the contact + // sharing feature is enabled for the domain. For more information about + // excluding user profiles, see the administration help center + // (https://support.google.com/a/answer/1285988). + IncludeInGlobalAddressList bool `json:"includeInGlobalAddressList,omitempty"` + + // IpWhitelisted: If `true`, the user's IP address is subject to a + // deprecated IP address `allowlist` + // (https://support.google.com/a/answer/60752) configuration. + IpWhitelisted bool `json:"ipWhitelisted,omitempty"` + + // IsAdmin: Output only. Indicates a user with super admininistrator + // privileges. The `isAdmin` property can only be edited in the Make a + // user an administrator + // (/admin-sdk/directory/v1/guides/manage-users.html#make_admin) + // operation ( makeAdmin + // (/admin-sdk/directory/v1/reference/users/makeAdmin.html) method). If + // edited in the user insert + // (/admin-sdk/directory/v1/reference/users/insert.html) or update + // (/admin-sdk/directory/v1/reference/users/update.html) methods, the + // edit is ignored by the API service. + IsAdmin bool `json:"isAdmin,omitempty"` + + // IsDelegatedAdmin: Output only. Indicates if the user is a delegated + // administrator. Delegated administrators are supported by the API but + // cannot create or undelete users, or make users administrators. These + // requests are ignored by the API service. Roles and privileges for + // administrators are assigned using the Admin console + // (https://support.google.com/a/answer/33325). + IsDelegatedAdmin bool `json:"isDelegatedAdmin,omitempty"` + + // IsEnforcedIn2Sv: Output only. Is 2-step verification enforced + // (Read-only) + IsEnforcedIn2Sv bool `json:"isEnforcedIn2Sv,omitempty"` + + // IsEnrolledIn2Sv: Output only. Is enrolled in 2-step verification + // (Read-only) + IsEnrolledIn2Sv bool `json:"isEnrolledIn2Sv,omitempty"` + + // IsMailboxSetup: Output only. Indicates if the user's Google mailbox + // is created. This property is only applicable if the user has been + // assigned a Gmail license. + IsMailboxSetup bool `json:"isMailboxSetup,omitempty"` + + // Keywords: The list of the user's keywords. The maximum allowed data + // size for this field is 1KB. + Keywords interface{} `json:"keywords,omitempty"` + + // Kind: Output only. The type of the API resource. For Users resources, + // the value is `admin#directory#user`. + Kind string `json:"kind,omitempty"` + + // Languages: The user's languages. The maximum allowed data size for + // this field is 1KB. + Languages interface{} `json:"languages,omitempty"` + + // LastLoginTime: User's last login time. (Read-only) + LastLoginTime string `json:"lastLoginTime,omitempty"` + + // Locations: The user's locations. The maximum allowed data size for + // this field is 10KB. + Locations interface{} `json:"locations,omitempty"` + + // Name: Holds the given and family names of the user, and the read-only + // `fullName` value. The maximum number of characters in the `givenName` + // and in the `familyName` values is 60. In addition, name values + // support unicode/UTF-8 characters, and can contain spaces, letters + // (a-z), numbers (0-9), dashes (-), forward slashes (/), and periods + // (.). For more information about character usage rules, see the + // administration help center + // (https://support.google.com/a/answer/9193374). Maximum allowed data + // size for this field is 1KB. + Name *UserName `json:"name,omitempty"` + + // NonEditableAliases: Output only. The list of the user's non-editable + // alias email addresses. These are typically outside the account's + // primary domain or sub-domain. + NonEditableAliases []string `json:"nonEditableAliases,omitempty"` + + // Notes: Notes for the user. + Notes interface{} `json:"notes,omitempty"` + + // OrgUnitPath: The full path of the parent organization associated with + // the user. If the parent organization is the top-level, it is + // represented as a forward slash (`/`). + OrgUnitPath string `json:"orgUnitPath,omitempty"` + + // Organizations: The list of organizations the user belongs to. The + // maximum allowed data size for this field is 10KB. + Organizations interface{} `json:"organizations,omitempty"` + + // Password: User's password + Password string `json:"password,omitempty"` + + // Phones: The list of the user's phone numbers. The maximum allowed + // data size for this field is 1KB. + Phones interface{} `json:"phones,omitempty"` + + // PosixAccounts: The list of POSIX + // (https://www.opengroup.org/austin/papers/posix_faq.html) account + // information for the user. + PosixAccounts interface{} `json:"posixAccounts,omitempty"` + + // PrimaryEmail: The user's primary email address. This property is + // required in a request to create a user account. The `primaryEmail` + // must be unique and cannot be an alias of another user. + PrimaryEmail string `json:"primaryEmail,omitempty"` + + // RecoveryEmail: Recovery email of the user. + RecoveryEmail string `json:"recoveryEmail,omitempty"` + + // RecoveryPhone: Recovery phone of the user. The phone number must be + // in the E.164 format, starting with the plus sign (+). Example: + // *+16506661212*. + RecoveryPhone string `json:"recoveryPhone,omitempty"` + + // Relations: The list of the user's relationships to other users. The + // maximum allowed data size for this field is 2KB. + Relations interface{} `json:"relations,omitempty"` + + // SshPublicKeys: A list of SSH public keys. + SshPublicKeys interface{} `json:"sshPublicKeys,omitempty"` + + // Suspended: Indicates if user is suspended. + Suspended bool `json:"suspended,omitempty"` + + // SuspensionReason: Output only. Has the reason a user account is + // suspended either by the administrator or by Google at the time of + // suspension. The property is returned only if the `suspended` property + // is `true`. + SuspensionReason string `json:"suspensionReason,omitempty"` + + // ThumbnailPhotoEtag: Output only. ETag of the user's photo (Read-only) + ThumbnailPhotoEtag string `json:"thumbnailPhotoEtag,omitempty"` + + // ThumbnailPhotoUrl: Output only. The URL of the user's profile photo. + // The URL might be temporary or private. + ThumbnailPhotoUrl string `json:"thumbnailPhotoUrl,omitempty"` + + // Websites: The user's websites. The maximum allowed data size for this + // field is 2KB. + Websites interface{} `json:"websites,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Addresses") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Addresses") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *User) MarshalJSON() ([]byte, error) { + type NoMethod User + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserAbout: JSON template for About (notes) of a user in Directory +// API. +type UserAbout struct { + // ContentType: About entry can have a type which indicates the content + // type. It can either be plain or html. By default, notes contents are + // assumed to contain plain text. + ContentType string `json:"contentType,omitempty"` + + // Value: Actual value of notes. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ContentType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ContentType") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserAbout) MarshalJSON() ([]byte, error) { + type NoMethod UserAbout + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserAddress: JSON template for address. +type UserAddress struct { + // Country: Country. + Country string `json:"country,omitempty"` + + // CountryCode: Country code. + CountryCode string `json:"countryCode,omitempty"` + + // CustomType: Custom type. + CustomType string `json:"customType,omitempty"` + + // ExtendedAddress: Extended Address. + ExtendedAddress string `json:"extendedAddress,omitempty"` + + // Formatted: Formatted address. + Formatted string `json:"formatted,omitempty"` + + // Locality: Locality. + Locality string `json:"locality,omitempty"` + + // PoBox: Other parts of address. + PoBox string `json:"poBox,omitempty"` + + // PostalCode: Postal code. + PostalCode string `json:"postalCode,omitempty"` + + // Primary: If this is user's primary address. Only one entry could be + // marked as primary. + Primary bool `json:"primary,omitempty"` + + // Region: Region. + Region string `json:"region,omitempty"` + + // SourceIsStructured: User supplied address was structured. Structured + // addresses are NOT supported at this time. You might be able to write + // structured addresses but any values will eventually be clobbered. + SourceIsStructured bool `json:"sourceIsStructured,omitempty"` + + // StreetAddress: Street. + StreetAddress string `json:"streetAddress,omitempty"` + + // Type: Each entry can have a type which indicates standard values of + // that entry. For example address could be of home work etc. In + // addition to the standard type an entry can have a custom type and can + // take any value. Such type should have the CUSTOM value as type and + // also have a customType value. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Country") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Country") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserAddress) MarshalJSON() ([]byte, error) { + type NoMethod UserAddress + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserAlias: The Directory API manages aliases, which are alternative +// email addresses. +type UserAlias struct { + // Alias: The alias email address. + Alias string `json:"alias,omitempty"` + + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Id: The unique ID for the user. + Id string `json:"id,omitempty"` + + // Kind: The type of the API resource. For Alias resources, the value is + // `admin#directory#alias`. + Kind string `json:"kind,omitempty"` + + // PrimaryEmail: The user's primary email address. + PrimaryEmail string `json:"primaryEmail,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Alias") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Alias") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserAlias) MarshalJSON() ([]byte, error) { + type NoMethod UserAlias + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserEmail: JSON template for an email. +type UserEmail struct { + // Address: Email id of the user. + Address string `json:"address,omitempty"` + + // CustomType: Custom Type. + CustomType string `json:"customType,omitempty"` + + // Primary: If this is user's primary email. Only one entry could be + // marked as primary. + Primary bool `json:"primary,omitempty"` + + // Type: Each entry can have a type which indicates standard types of + // that entry. For example email could be of home, work etc. In addition + // to the standard type, an entry can have a custom type and can take + // any value Such types should have the CUSTOM value as type and also + // have a customType value. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserEmail) MarshalJSON() ([]byte, error) { + type NoMethod UserEmail + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserExternalId: JSON template for an externalId entry. +type UserExternalId struct { + // CustomType: Custom type. + CustomType string `json:"customType,omitempty"` + + // Type: The type of the Id. + Type string `json:"type,omitempty"` + + // Value: The value of the id. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserExternalId) MarshalJSON() ([]byte, error) { + type NoMethod UserExternalId + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UserGender struct { + // AddressMeAs: AddressMeAs. A human-readable string containing the + // proper way to refer to the profile owner by humans for example + // he/him/his or they/them/their. + AddressMeAs string `json:"addressMeAs,omitempty"` + + // CustomGender: Custom gender. + CustomGender string `json:"customGender,omitempty"` + + // Type: Gender. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AddressMeAs") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AddressMeAs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserGender) MarshalJSON() ([]byte, error) { + type NoMethod UserGender + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserIm: JSON template for instant messenger of an user. +type UserIm struct { + // CustomProtocol: Custom protocol. + CustomProtocol string `json:"customProtocol,omitempty"` + + // CustomType: Custom type. + CustomType string `json:"customType,omitempty"` + + // Im: Instant messenger id. + Im string `json:"im,omitempty"` + + // Primary: If this is user's primary im. Only one entry could be marked + // as primary. + Primary bool `json:"primary,omitempty"` + + // Protocol: Protocol used in the instant messenger. It should be one of + // the values from ImProtocolTypes map. Similar to type it can take a + // CUSTOM value and specify the custom name in customProtocol field. + Protocol string `json:"protocol,omitempty"` + + // Type: Each entry can have a type which indicates standard types of + // that entry. For example instant messengers could be of home work etc. + // In addition to the standard type an entry can have a custom type and + // can take any value. Such types should have the CUSTOM value as type + // and also have a customType value. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomProtocol") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomProtocol") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UserIm) MarshalJSON() ([]byte, error) { + type NoMethod UserIm + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserKeyword: JSON template for a keyword entry. +type UserKeyword struct { + // CustomType: Custom Type. + CustomType string `json:"customType,omitempty"` + + // Type: Each entry can have a type which indicates standard type of + // that entry. For example keyword could be of type occupation or + // outlook. In addition to the standard type an entry can have a custom + // type and can give it any name. Such types should have the CUSTOM + // value as type and also have a customType value. + Type string `json:"type,omitempty"` + + // Value: Keyword. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserKeyword) MarshalJSON() ([]byte, error) { + type NoMethod UserKeyword + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserLanguage: JSON template for a language entry. +type UserLanguage struct { + // CustomLanguage: Other language. User can provide their own language + // name if there is no corresponding ISO 639 language code. If this is + // set, `languageCode` can't be set. + CustomLanguage string `json:"customLanguage,omitempty"` + + // LanguageCode: ISO 639 string representation of a language. See + // Language Codes (/admin-sdk/directory/v1/languages) for the list of + // supported codes. Valid language codes outside the supported set will + // be accepted by the API but may lead to unexpected behavior. Illegal + // values cause `SchemaException`. If this is set, `customLanguage` + // can't be set. + LanguageCode string `json:"languageCode,omitempty"` + + // Preference: Optional. If present, controls whether the specified + // `languageCode` is the user's preferred language. If `customLanguage` + // is set, this can't be set. Allowed values are `preferred` and + // `not_preferred`. + Preference string `json:"preference,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomLanguage") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomLanguage") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UserLanguage) MarshalJSON() ([]byte, error) { + type NoMethod UserLanguage + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserLocation: JSON template for a location entry. +type UserLocation struct { + // Area: Textual location. This is most useful for display purposes to + // concisely describe the location. For example 'Mountain View, CA', + // 'Near Seattle', 'US-NYC-9TH 9A209A.'' + Area string `json:"area,omitempty"` + + // BuildingId: Building Identifier. + BuildingId string `json:"buildingId,omitempty"` + + // CustomType: Custom Type. + CustomType string `json:"customType,omitempty"` + + // DeskCode: Most specific textual code of individual desk location. + DeskCode string `json:"deskCode,omitempty"` + + // FloorName: Floor name/number. + FloorName string `json:"floorName,omitempty"` + + // FloorSection: Floor section. More specific location within the floor. + // For example if a floor is divided into sections 'A', 'B' and 'C' this + // field would identify one of those values. + FloorSection string `json:"floorSection,omitempty"` + + // Type: Each entry can have a type which indicates standard types of + // that entry. For example location could be of types default and desk. + // In addition to standard type an entry can have a custom type and can + // give it any name. Such types should have 'custom' as type and also + // have a customType value. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Area") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Area") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserLocation) MarshalJSON() ([]byte, error) { + type NoMethod UserLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UserMakeAdmin struct { + // Status: Indicates the administrator status of the user. + Status bool `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Status") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Status") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserMakeAdmin) MarshalJSON() ([]byte, error) { + type NoMethod UserMakeAdmin + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UserName struct { + // DisplayName: The user's display name. Limit: 256 characters. + DisplayName string `json:"displayName,omitempty"` + + // FamilyName: The user's last name. Required when creating a user + // account. + FamilyName string `json:"familyName,omitempty"` + + // FullName: The user's full name formed by concatenating the first and + // last name values. + FullName string `json:"fullName,omitempty"` + + // GivenName: The user's first name. Required when creating a user + // account. + GivenName string `json:"givenName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserName) MarshalJSON() ([]byte, error) { + type NoMethod UserName + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserOrganization: JSON template for an organization entry. +type UserOrganization struct { + // CostCenter: The cost center of the users department. + CostCenter string `json:"costCenter,omitempty"` + + // CustomType: Custom type. + CustomType string `json:"customType,omitempty"` + + // Department: Department within the organization. + Department string `json:"department,omitempty"` + + // Description: Description of the organization. + Description string `json:"description,omitempty"` + + // Domain: The domain to which the organization belongs to. + Domain string `json:"domain,omitempty"` + + // FullTimeEquivalent: The full-time equivalent millipercent within the + // organization (100000 = 100%). + FullTimeEquivalent int64 `json:"fullTimeEquivalent,omitempty"` + + // Location: Location of the organization. This need not be fully + // qualified address. + Location string `json:"location,omitempty"` + + // Name: Name of the organization + Name string `json:"name,omitempty"` + + // Primary: If it user's primary organization. + Primary bool `json:"primary,omitempty"` + + // Symbol: Symbol of the organization. + Symbol string `json:"symbol,omitempty"` + + // Title: Title (designation) of the user in the organization. + Title string `json:"title,omitempty"` + + // Type: Each entry can have a type which indicates standard types of + // that entry. For example organization could be of school work etc. In + // addition to the standard type an entry can have a custom type and can + // give it any name. Such types should have the CUSTOM value as type and + // also have a CustomType value. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CostCenter") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CostCenter") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserOrganization) MarshalJSON() ([]byte, error) { + type NoMethod UserOrganization + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserPhone: JSON template for a phone entry. +type UserPhone struct { + // CustomType: Custom Type. + CustomType string `json:"customType,omitempty"` + + // Primary: If this is user's primary phone or not. + Primary bool `json:"primary,omitempty"` + + // Type: Each entry can have a type which indicates standard types of + // that entry. For example phone could be of home_fax work mobile etc. + // In addition to the standard type an entry can have a custom type and + // can give it any name. Such types should have the CUSTOM value as type + // and also have a customType value. + Type string `json:"type,omitempty"` + + // Value: Phone number. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserPhone) MarshalJSON() ([]byte, error) { + type NoMethod UserPhone + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UserPhoto struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Height: Height of the photo in pixels. + Height int64 `json:"height,omitempty"` + + // Id: The ID the API uses to uniquely identify the user. + Id string `json:"id,omitempty"` + + // Kind: The type of the API resource. For Photo resources, this is + // `admin#directory#user#photo`. + Kind string `json:"kind,omitempty"` + + // MimeType: The MIME type of the photo. Allowed values are `JPEG`, + // `PNG`, `GIF`, `BMP`, `TIFF`, and web-safe base64 encoding. + MimeType string `json:"mimeType,omitempty"` + + // PhotoData: The user photo's upload data in web-safe Base64 + // (https://en.wikipedia.org/wiki/Base64#URL_applications) format in + // bytes. This means: * The slash (/) character is replaced with the + // underscore (_) character. * The plus sign (+) character is replaced + // with the hyphen (-) character. * The equals sign (=) character is + // replaced with the asterisk (*). * For padding, the period (.) + // character is used instead of the RFC-4648 baseURL definition which + // uses the equals sign (=) for padding. This is done to simplify + // URL-parsing. * Whatever the size of the photo being uploaded, the API + // downsizes it to 96x96 pixels. + PhotoData string `json:"photoData,omitempty"` + + // PrimaryEmail: The user's primary email address. + PrimaryEmail string `json:"primaryEmail,omitempty"` + + // Width: Width of the photo in pixels. + Width int64 `json:"width,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserPhoto) MarshalJSON() ([]byte, error) { + type NoMethod UserPhoto + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserPosixAccount: JSON template for a POSIX account entry. +type UserPosixAccount struct { + // AccountId: A POSIX account field identifier. + AccountId string `json:"accountId,omitempty"` + + // Gecos: The GECOS (user information) for this account. + Gecos string `json:"gecos,omitempty"` + + // Gid: The default group ID. + Gid uint64 `json:"gid,omitempty,string"` + + // HomeDirectory: The path to the home directory for this account. + HomeDirectory string `json:"homeDirectory,omitempty"` + + // OperatingSystemType: The operating system type for this account. + OperatingSystemType string `json:"operatingSystemType,omitempty"` + + // Primary: If this is user's primary account within the SystemId. + Primary bool `json:"primary,omitempty"` + + // Shell: The path to the login shell for this account. + Shell string `json:"shell,omitempty"` + + // SystemId: System identifier for which account Username or Uid apply + // to. + SystemId string `json:"systemId,omitempty"` + + // Uid: The POSIX compliant user ID. + Uid uint64 `json:"uid,omitempty,string"` + + // Username: The username of the account. + Username string `json:"username,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccountId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccountId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserPosixAccount) MarshalJSON() ([]byte, error) { + type NoMethod UserPosixAccount + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserRelation: JSON template for a relation entry. +type UserRelation struct { + // CustomType: Custom Type. + CustomType string `json:"customType,omitempty"` + + // Type: The relation of the user. Some of the possible values are + // mother father sister brother manager assistant partner. + Type string `json:"type,omitempty"` + + // Value: The name of the relation. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserRelation) MarshalJSON() ([]byte, error) { + type NoMethod UserRelation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserSshPublicKey: JSON template for a POSIX account entry. +type UserSshPublicKey struct { + // ExpirationTimeUsec: An expiration time in microseconds since epoch. + ExpirationTimeUsec int64 `json:"expirationTimeUsec,omitempty,string"` + + // Fingerprint: A SHA-256 fingerprint of the SSH public key. (Read-only) + Fingerprint string `json:"fingerprint,omitempty"` + + // Key: An SSH public key. + Key string `json:"key,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExpirationTimeUsec") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExpirationTimeUsec") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UserSshPublicKey) MarshalJSON() ([]byte, error) { + type NoMethod UserSshPublicKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UserUndelete struct { + // OrgUnitPath: OrgUnit of User + OrgUnitPath string `json:"orgUnitPath,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OrgUnitPath") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OrgUnitPath") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserUndelete) MarshalJSON() ([]byte, error) { + type NoMethod UserUndelete + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UserWebsite: JSON template for a website entry. +type UserWebsite struct { + // CustomType: Custom Type. + CustomType string `json:"customType,omitempty"` + + // Primary: If this is user's primary website or not. + Primary bool `json:"primary,omitempty"` + + // Type: Each entry can have a type which indicates standard types of + // that entry. For example website could be of home work blog etc. In + // addition to the standard type an entry can have a custom type and can + // give it any name. Such types should have the CUSTOM value as type and + // also have a customType value. + Type string `json:"type,omitempty"` + + // Value: Website. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UserWebsite) MarshalJSON() ([]byte, error) { + type NoMethod UserWebsite + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type Users struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: Kind of resource this is. + Kind string `json:"kind,omitempty"` + + // NextPageToken: Token used to access next page of this result. + NextPageToken string `json:"nextPageToken,omitempty"` + + // TriggerEvent: Event that triggered this response (only used in case + // of Push Response) + TriggerEvent string `json:"trigger_event,omitempty"` + + // Users: A list of user objects. + Users []*User `json:"users,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Users) MarshalJSON() ([]byte, error) { + type NoMethod Users + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VerificationCode: The Directory API allows you to view, generate, and +// invalidate backup verification codes for a user. +type VerificationCode struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Kind: The type of the resource. This is always + // `admin#directory#verificationCode`. + Kind string `json:"kind,omitempty"` + + // UserId: The obfuscated unique ID of the user. + UserId string `json:"userId,omitempty"` + + // VerificationCode: A current verification code for the user. + // Invalidated or used verification codes are not returned as part of + // the result. + VerificationCode string `json:"verificationCode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VerificationCode) MarshalJSON() ([]byte, error) { + type NoMethod VerificationCode + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VerificationCodes: JSON response template for list verification codes +// operation in Directory API. +type VerificationCodes struct { + // Etag: ETag of the resource. + Etag string `json:"etag,omitempty"` + + // Items: A list of verification code resources. + Items []*VerificationCode `json:"items,omitempty"` + + // Kind: The type of the resource. This is always + // `admin#directory#verificationCodesList`. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VerificationCodes) MarshalJSON() ([]byte, error) { + type NoMethod VerificationCodes + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "directory.asps.delete": + +type AspsDeleteCall struct { + s *Service + userKey string + codeId int64 + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an ASP issued by a user. +// +// - codeId: The unique ID of the ASP to be deleted. +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *AspsService) Delete(userKey string, codeId int64) *AspsDeleteCall { + c := &AspsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.codeId = codeId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AspsDeleteCall) Fields(s ...googleapi.Field) *AspsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AspsDeleteCall) Context(ctx context.Context) *AspsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AspsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AspsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/asps/{codeId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + "codeId": strconv.FormatInt(c.codeId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.asps.delete" call. +func (c *AspsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes an ASP issued by a user.", + // "flatPath": "admin/directory/v1/users/{userKey}/asps/{codeId}", + // "httpMethod": "DELETE", + // "id": "directory.asps.delete", + // "parameterOrder": [ + // "userKey", + // "codeId" + // ], + // "parameters": { + // "codeId": { + // "description": "The unique ID of the ASP to be deleted.", + // "format": "int32", + // "location": "path", + // "required": true, + // "type": "integer" + // }, + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/asps/{codeId}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} + +// method id "directory.asps.get": + +type AspsGetCall struct { + s *Service + userKey string + codeId int64 + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets information about an ASP issued by a user. +// +// - codeId: The unique ID of the ASP. +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *AspsService) Get(userKey string, codeId int64) *AspsGetCall { + c := &AspsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.codeId = codeId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AspsGetCall) Fields(s ...googleapi.Field) *AspsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AspsGetCall) IfNoneMatch(entityTag string) *AspsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AspsGetCall) Context(ctx context.Context) *AspsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AspsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AspsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/asps/{codeId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + "codeId": strconv.FormatInt(c.codeId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.asps.get" call. +// Exactly one of *Asp or error will be non-nil. Any non-2xx status code +// is an error. Response headers are in either +// *Asp.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *AspsGetCall) Do(opts ...googleapi.CallOption) (*Asp, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Asp{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets information about an ASP issued by a user.", + // "flatPath": "admin/directory/v1/users/{userKey}/asps/{codeId}", + // "httpMethod": "GET", + // "id": "directory.asps.get", + // "parameterOrder": [ + // "userKey", + // "codeId" + // ], + // "parameters": { + // "codeId": { + // "description": "The unique ID of the ASP.", + // "format": "int32", + // "location": "path", + // "required": true, + // "type": "integer" + // }, + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/asps/{codeId}", + // "response": { + // "$ref": "Asp" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} + +// method id "directory.asps.list": + +type AspsListCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the ASPs issued by a user. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *AspsService) List(userKey string) *AspsListCall { + c := &AspsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AspsListCall) Fields(s ...googleapi.Field) *AspsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AspsListCall) IfNoneMatch(entityTag string) *AspsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AspsListCall) Context(ctx context.Context) *AspsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AspsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AspsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/asps") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.asps.list" call. +// Exactly one of *Asps or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Asps.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *AspsListCall) Do(opts ...googleapi.CallOption) (*Asps, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Asps{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the ASPs issued by a user.", + // "flatPath": "admin/directory/v1/users/{userKey}/asps", + // "httpMethod": "GET", + // "id": "directory.asps.list", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/asps", + // "response": { + // "$ref": "Asps" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} + +// method id "admin.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Stop: Stops watching resources through this channel. +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChannelsStopCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory_v1/channels/stop") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Stops watching resources through this channel.", + // "flatPath": "admin/directory_v1/channels/stop", + // "httpMethod": "POST", + // "id": "admin.channels.stop", + // "parameterOrder": [], + // "parameters": {}, + // "path": "admin/directory_v1/channels/stop", + // "request": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user", + // "https://www.googleapis.com/auth/admin.directory.user.alias", + // "https://www.googleapis.com/auth/admin.directory.user.alias.readonly", + // "https://www.googleapis.com/auth/admin.directory.user.readonly", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "directory.chromeosdevices.action": + +type ChromeosdevicesActionCall struct { + s *Service + customerId string + resourceId string + chromeosdeviceaction *ChromeOsDeviceAction + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Action: Takes an action that affects a Chrome OS Device. This +// includes deprovisioning, disabling, and re-enabling devices. +// *Warning:* * Deprovisioning a device will stop device policy syncing +// and remove device-level printers. After a device is deprovisioned, it +// must be wiped before it can be re-enrolled. * Lost or stolen devices +// should use the disable action. * Re-enabling a disabled device will +// consume a device license. If you do not have sufficient licenses +// available when completing the re-enable action, you will receive an +// error. For more information about deprovisioning and disabling +// devices, visit the help center +// (https://support.google.com/chrome/a/answer/3523633). +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - resourceId: The unique ID of the device. The `resourceId`s are +// returned in the response from the chromeosdevices.list +// (/admin-sdk/directory/v1/reference/chromeosdevices/list) method. +func (r *ChromeosdevicesService) Action(customerId string, resourceId string, chromeosdeviceaction *ChromeOsDeviceAction) *ChromeosdevicesActionCall { + c := &ChromeosdevicesActionCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.resourceId = resourceId + c.chromeosdeviceaction = chromeosdeviceaction + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChromeosdevicesActionCall) Fields(s ...googleapi.Field) *ChromeosdevicesActionCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChromeosdevicesActionCall) Context(ctx context.Context) *ChromeosdevicesActionCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChromeosdevicesActionCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChromeosdevicesActionCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.chromeosdeviceaction) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/chromeos/{resourceId}/action") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "resourceId": c.resourceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.chromeosdevices.action" call. +func (c *ChromeosdevicesActionCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Takes an action that affects a Chrome OS Device. This includes deprovisioning, disabling, and re-enabling devices. *Warning:* * Deprovisioning a device will stop device policy syncing and remove device-level printers. After a device is deprovisioned, it must be wiped before it can be re-enrolled. * Lost or stolen devices should use the disable action. * Re-enabling a disabled device will consume a device license. If you do not have sufficient licenses available when completing the re-enable action, you will receive an error. For more information about deprovisioning and disabling devices, visit the [help center](https://support.google.com/chrome/a/answer/3523633).", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{resourceId}/action", + // "httpMethod": "POST", + // "id": "directory.chromeosdevices.action", + // "parameterOrder": [ + // "customerId", + // "resourceId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "resourceId": { + // "description": "The unique ID of the device. The `resourceId`s are returned in the response from the [chromeosdevices.list](/admin-sdk/directory/v1/reference/chromeosdevices/list) method.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{resourceId}/action", + // "request": { + // "$ref": "ChromeOsDeviceAction" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.chromeos" + // ] + // } + +} + +// method id "directory.chromeosdevices.get": + +type ChromeosdevicesGetCall struct { + s *Service + customerId string + deviceId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a Chrome OS device's properties. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - deviceId: The unique ID of the device. The `deviceId`s are returned +// in the response from the chromeosdevices.list +// (/admin-sdk/directory/v1/reference/chromeosdevices/list) method. +func (r *ChromeosdevicesService) Get(customerId string, deviceId string) *ChromeosdevicesGetCall { + c := &ChromeosdevicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.deviceId = deviceId + return c +} + +// Projection sets the optional parameter "projection": Determines +// whether the response contains the full list of properties or only a +// subset. +// +// Possible values: +// +// "BASIC" - Includes only the basic metadata fields (e.g., deviceId, +// +// serialNumber, status, and user) +// +// "FULL" - Includes all metadata fields +func (c *ChromeosdevicesGetCall) Projection(projection string) *ChromeosdevicesGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChromeosdevicesGetCall) Fields(s ...googleapi.Field) *ChromeosdevicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChromeosdevicesGetCall) IfNoneMatch(entityTag string) *ChromeosdevicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChromeosdevicesGetCall) Context(ctx context.Context) *ChromeosdevicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChromeosdevicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChromeosdevicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "deviceId": c.deviceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.chromeosdevices.get" call. +// Exactly one of *ChromeOsDevice or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ChromeOsDevice.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ChromeosdevicesGetCall) Do(opts ...googleapi.CallOption) (*ChromeOsDevice, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ChromeOsDevice{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a Chrome OS device's properties.", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + // "httpMethod": "GET", + // "id": "directory.chromeosdevices.get", + // "parameterOrder": [ + // "customerId", + // "deviceId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "deviceId": { + // "description": "The unique ID of the device. The `deviceId`s are returned in the response from the [chromeosdevices.list](/admin-sdk/directory/v1/reference/chromeosdevices/list) method.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Determines whether the response contains the full list of properties or only a subset.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "enumDescriptions": [ + // "Includes only the basic metadata fields (e.g., deviceId, serialNumber, status, and user)", + // "Includes all metadata fields" + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + // "response": { + // "$ref": "ChromeOsDevice" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.chromeos", + // "https://www.googleapis.com/auth/admin.directory.device.chromeos.readonly" + // ] + // } + +} + +// method id "directory.chromeosdevices.list": + +type ChromeosdevicesListCall struct { + s *Service + customerId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a paginated list of Chrome OS devices within an +// account. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +func (r *ChromeosdevicesService) List(customerId string) *ChromeosdevicesListCall { + c := &ChromeosdevicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + return c +} + +// IncludeChildOrgunits sets the optional parameter +// "includeChildOrgunits": Return devices from all child orgunits, as +// well as the specified org unit. If this is set to true, 'orgUnitPath' +// must be provided. +func (c *ChromeosdevicesListCall) IncludeChildOrgunits(includeChildOrgunits bool) *ChromeosdevicesListCall { + c.urlParams_.Set("includeChildOrgunits", fmt.Sprint(includeChildOrgunits)) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. +func (c *ChromeosdevicesListCall) MaxResults(maxResults int64) *ChromeosdevicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Device property to use +// for sorting results. +// +// Possible values: +// +// "annotatedLocation" - Chrome device location as annotated by the +// +// administrator. +// +// "annotatedUser" - Chromebook user as annotated by administrator. +// "lastSync" - The date and time the Chrome device was last +// +// synchronized with the policy settings in the Admin console. +// +// "notes" - Chrome device notes as annotated by the administrator. +// "serialNumber" - The Chrome device serial number entered when the +// +// device was enabled. +// +// "status" - Chrome device status. For more information, see the <a +// +// [chromeosdevices](/admin-sdk/directory/v1/reference/chromeosdevices.ht +// ml). +func (c *ChromeosdevicesListCall) OrderBy(orderBy string) *ChromeosdevicesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// OrgUnitPath sets the optional parameter "orgUnitPath": The full path +// of the organizational unit (minus the leading `/`) or its unique ID. +func (c *ChromeosdevicesListCall) OrgUnitPath(orgUnitPath string) *ChromeosdevicesListCall { + c.urlParams_.Set("orgUnitPath", orgUnitPath) + return c +} + +// PageToken sets the optional parameter "pageToken": The `pageToken` +// query parameter is used to request the next page of query results. +// The follow-on request's `pageToken` query parameter is the +// `nextPageToken` from your previous response. +func (c *ChromeosdevicesListCall) PageToken(pageToken string) *ChromeosdevicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Projection sets the optional parameter "projection": Restrict +// information returned to a set of selected fields. +// +// Possible values: +// +// "BASIC" - Includes only the basic metadata fields (e.g., deviceId, +// +// serialNumber, status, and user) +// +// "FULL" - Includes all metadata fields +func (c *ChromeosdevicesListCall) Projection(projection string) *ChromeosdevicesListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Query sets the optional parameter "query": Search string in the +// format given at +// https://developers.google.com/admin-sdk/directory/v1/list-query-operators +func (c *ChromeosdevicesListCall) Query(query string) *ChromeosdevicesListCall { + c.urlParams_.Set("query", query) + return c +} + +// SortOrder sets the optional parameter "sortOrder": Whether to return +// results in ascending or descending order. Must be used with the +// `orderBy` parameter. +// +// Possible values: +// +// "ASCENDING" - Ascending order. +// "DESCENDING" - Descending order. +func (c *ChromeosdevicesListCall) SortOrder(sortOrder string) *ChromeosdevicesListCall { + c.urlParams_.Set("sortOrder", sortOrder) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChromeosdevicesListCall) Fields(s ...googleapi.Field) *ChromeosdevicesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChromeosdevicesListCall) IfNoneMatch(entityTag string) *ChromeosdevicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChromeosdevicesListCall) Context(ctx context.Context) *ChromeosdevicesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChromeosdevicesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChromeosdevicesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/chromeos") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.chromeosdevices.list" call. +// Exactly one of *ChromeOsDevices or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ChromeOsDevices.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ChromeosdevicesListCall) Do(opts ...googleapi.CallOption) (*ChromeOsDevices, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ChromeOsDevices{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a paginated list of Chrome OS devices within an account.", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos", + // "httpMethod": "GET", + // "id": "directory.chromeosdevices.list", + // "parameterOrder": [ + // "customerId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeChildOrgunits": { + // "description": "Return devices from all child orgunits, as well as the specified org unit. If this is set to true, 'orgUnitPath' must be provided.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "100", + // "description": "Maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "minimum": "1", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Device property to use for sorting results.", + // "enum": [ + // "annotatedLocation", + // "annotatedUser", + // "lastSync", + // "notes", + // "serialNumber", + // "status" + // ], + // "enumDescriptions": [ + // "Chrome device location as annotated by the administrator.", + // "Chromebook user as annotated by administrator.", + // "The date and time the Chrome device was last synchronized with the policy settings in the Admin console.", + // "Chrome device notes as annotated by the administrator.", + // "The Chrome device serial number entered when the device was enabled.", + // "Chrome device status. For more information, see the \u003ca [chromeosdevices](/admin-sdk/directory/v1/reference/chromeosdevices.html)." + // ], + // "location": "query", + // "type": "string" + // }, + // "orgUnitPath": { + // "description": "The full path of the organizational unit (minus the leading `/`) or its unique ID.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "The `pageToken` query parameter is used to request the next page of query results. The follow-on request's `pageToken` query parameter is the `nextPageToken` from your previous response.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Restrict information returned to a set of selected fields.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "enumDescriptions": [ + // "Includes only the basic metadata fields (e.g., deviceId, serialNumber, status, and user)", + // "Includes all metadata fields" + // ], + // "location": "query", + // "type": "string" + // }, + // "query": { + // "description": "Search string in the format given at https://developers.google.com/admin-sdk/directory/v1/list-query-operators", + // "location": "query", + // "type": "string" + // }, + // "sortOrder": { + // "description": "Whether to return results in ascending or descending order. Must be used with the `orderBy` parameter.", + // "enum": [ + // "ASCENDING", + // "DESCENDING" + // ], + // "enumDescriptions": [ + // "Ascending order.", + // "Descending order." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/chromeos", + // "response": { + // "$ref": "ChromeOsDevices" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.chromeos", + // "https://www.googleapis.com/auth/admin.directory.device.chromeos.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ChromeosdevicesListCall) Pages(ctx context.Context, f func(*ChromeOsDevices) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "directory.chromeosdevices.moveDevicesToOu": + +type ChromeosdevicesMoveDevicesToOuCall struct { + s *Service + customerId string + chromeosmovedevicestoou *ChromeOsMoveDevicesToOu + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// MoveDevicesToOu: Moves or inserts multiple Chrome OS devices to an +// organizational unit. You can move up to 50 devices at once. +// +// - customerId: Immutable. ID of the Google Workspace account. +// - orgUnitPath: Full path of the target organizational unit or its ID. +func (r *ChromeosdevicesService) MoveDevicesToOu(customerId string, orgUnitPath string, chromeosmovedevicestoou *ChromeOsMoveDevicesToOu) *ChromeosdevicesMoveDevicesToOuCall { + c := &ChromeosdevicesMoveDevicesToOuCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.urlParams_.Set("orgUnitPath", orgUnitPath) + c.chromeosmovedevicestoou = chromeosmovedevicestoou + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChromeosdevicesMoveDevicesToOuCall) Fields(s ...googleapi.Field) *ChromeosdevicesMoveDevicesToOuCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChromeosdevicesMoveDevicesToOuCall) Context(ctx context.Context) *ChromeosdevicesMoveDevicesToOuCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChromeosdevicesMoveDevicesToOuCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChromeosdevicesMoveDevicesToOuCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.chromeosmovedevicestoou) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/chromeos/moveDevicesToOu") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.chromeosdevices.moveDevicesToOu" call. +func (c *ChromeosdevicesMoveDevicesToOuCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Moves or inserts multiple Chrome OS devices to an organizational unit. You can move up to 50 devices at once.", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/moveDevicesToOu", + // "httpMethod": "POST", + // "id": "directory.chromeosdevices.moveDevicesToOu", + // "parameterOrder": [ + // "customerId", + // "orgUnitPath" + // ], + // "parameters": { + // "customerId": { + // "description": "Immutable. ID of the Google Workspace account", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "orgUnitPath": { + // "description": "Full path of the target organizational unit or its ID", + // "location": "query", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/moveDevicesToOu", + // "request": { + // "$ref": "ChromeOsMoveDevicesToOu" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.chromeos" + // ] + // } + +} + +// method id "directory.chromeosdevices.patch": + +type ChromeosdevicesPatchCall struct { + s *Service + customerId string + deviceId string + chromeosdevice *ChromeOsDevice + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a device's updatable properties, such as +// `annotatedUser`, `annotatedLocation`, `notes`, `orgUnitPath`, or +// `annotatedAssetId`. This method supports patch semantics +// (/admin-sdk/directory/v1/guides/performance#patch). +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - deviceId: The unique ID of the device. The `deviceId`s are returned +// in the response from the chromeosdevices.list +// (/admin-sdk/v1/reference/chromeosdevices/list) method. +func (r *ChromeosdevicesService) Patch(customerId string, deviceId string, chromeosdevice *ChromeOsDevice) *ChromeosdevicesPatchCall { + c := &ChromeosdevicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.deviceId = deviceId + c.chromeosdevice = chromeosdevice + return c +} + +// Projection sets the optional parameter "projection": Restrict +// information returned to a set of selected fields. +// +// Possible values: +// +// "BASIC" - Includes only the basic metadata fields (e.g., deviceId, +// +// serialNumber, status, and user) +// +// "FULL" - Includes all metadata fields +func (c *ChromeosdevicesPatchCall) Projection(projection string) *ChromeosdevicesPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChromeosdevicesPatchCall) Fields(s ...googleapi.Field) *ChromeosdevicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChromeosdevicesPatchCall) Context(ctx context.Context) *ChromeosdevicesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChromeosdevicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChromeosdevicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.chromeosdevice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "deviceId": c.deviceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.chromeosdevices.patch" call. +// Exactly one of *ChromeOsDevice or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ChromeOsDevice.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ChromeosdevicesPatchCall) Do(opts ...googleapi.CallOption) (*ChromeOsDevice, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ChromeOsDevice{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a device's updatable properties, such as `annotatedUser`, `annotatedLocation`, `notes`, `orgUnitPath`, or `annotatedAssetId`. This method supports [patch semantics](/admin-sdk/directory/v1/guides/performance#patch).", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + // "httpMethod": "PATCH", + // "id": "directory.chromeosdevices.patch", + // "parameterOrder": [ + // "customerId", + // "deviceId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "deviceId": { + // "description": "The unique ID of the device. The `deviceId`s are returned in the response from the [chromeosdevices.list](/admin-sdk/v1/reference/chromeosdevices/list) method.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Restrict information returned to a set of selected fields.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "enumDescriptions": [ + // "Includes only the basic metadata fields (e.g., deviceId, serialNumber, status, and user)", + // "Includes all metadata fields" + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + // "request": { + // "$ref": "ChromeOsDevice" + // }, + // "response": { + // "$ref": "ChromeOsDevice" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.chromeos" + // ] + // } + +} + +// method id "directory.chromeosdevices.update": + +type ChromeosdevicesUpdateCall struct { + s *Service + customerId string + deviceId string + chromeosdevice *ChromeOsDevice + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a device's updatable properties, such as +// `annotatedUser`, `annotatedLocation`, `notes`, `orgUnitPath`, or +// `annotatedAssetId`. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - deviceId: The unique ID of the device. The `deviceId`s are returned +// in the response from the chromeosdevices.list +// (/admin-sdk/v1/reference/chromeosdevices/list) method. +func (r *ChromeosdevicesService) Update(customerId string, deviceId string, chromeosdevice *ChromeOsDevice) *ChromeosdevicesUpdateCall { + c := &ChromeosdevicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.deviceId = deviceId + c.chromeosdevice = chromeosdevice + return c +} + +// Projection sets the optional parameter "projection": Restrict +// information returned to a set of selected fields. +// +// Possible values: +// +// "BASIC" - Includes only the basic metadata fields (e.g., deviceId, +// +// serialNumber, status, and user) +// +// "FULL" - Includes all metadata fields +func (c *ChromeosdevicesUpdateCall) Projection(projection string) *ChromeosdevicesUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChromeosdevicesUpdateCall) Fields(s ...googleapi.Field) *ChromeosdevicesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChromeosdevicesUpdateCall) Context(ctx context.Context) *ChromeosdevicesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChromeosdevicesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChromeosdevicesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.chromeosdevice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "deviceId": c.deviceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.chromeosdevices.update" call. +// Exactly one of *ChromeOsDevice or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ChromeOsDevice.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ChromeosdevicesUpdateCall) Do(opts ...googleapi.CallOption) (*ChromeOsDevice, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ChromeOsDevice{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a device's updatable properties, such as `annotatedUser`, `annotatedLocation`, `notes`, `orgUnitPath`, or `annotatedAssetId`.", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + // "httpMethod": "PUT", + // "id": "directory.chromeosdevices.update", + // "parameterOrder": [ + // "customerId", + // "deviceId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "deviceId": { + // "description": "The unique ID of the device. The `deviceId`s are returned in the response from the [chromeosdevices.list](/admin-sdk/v1/reference/chromeosdevices/list) method.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Restrict information returned to a set of selected fields.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "enumDescriptions": [ + // "Includes only the basic metadata fields (e.g., deviceId, serialNumber, status, and user)", + // "Includes all metadata fields" + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}", + // "request": { + // "$ref": "ChromeOsDevice" + // }, + // "response": { + // "$ref": "ChromeOsDevice" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.chromeos" + // ] + // } + +} + +// method id "admin.customer.devices.chromeos.issueCommand": + +type CustomerDevicesChromeosIssueCommandCall struct { + s *Service + customerId string + deviceId string + directorychromeosdevicesissuecommandrequest *DirectoryChromeosdevicesIssueCommandRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// IssueCommand: Issues a command for the device to execute. +// +// - customerId: Immutable. ID of the Google Workspace account. +// - deviceId: Immutable. ID of Chrome OS Device. +func (r *CustomerDevicesChromeosService) IssueCommand(customerId string, deviceId string, directorychromeosdevicesissuecommandrequest *DirectoryChromeosdevicesIssueCommandRequest) *CustomerDevicesChromeosIssueCommandCall { + c := &CustomerDevicesChromeosIssueCommandCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.deviceId = deviceId + c.directorychromeosdevicesissuecommandrequest = directorychromeosdevicesissuecommandrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomerDevicesChromeosIssueCommandCall) Fields(s ...googleapi.Field) *CustomerDevicesChromeosIssueCommandCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomerDevicesChromeosIssueCommandCall) Context(ctx context.Context) *CustomerDevicesChromeosIssueCommandCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomerDevicesChromeosIssueCommandCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomerDevicesChromeosIssueCommandCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.directorychromeosdevicesissuecommandrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}:issueCommand") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "deviceId": c.deviceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customer.devices.chromeos.issueCommand" call. +// Exactly one of *DirectoryChromeosdevicesIssueCommandResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *DirectoryChromeosdevicesIssueCommandResponse.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *CustomerDevicesChromeosIssueCommandCall) Do(opts ...googleapi.CallOption) (*DirectoryChromeosdevicesIssueCommandResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DirectoryChromeosdevicesIssueCommandResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Issues a command for the device to execute.", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}:issueCommand", + // "httpMethod": "POST", + // "id": "admin.customer.devices.chromeos.issueCommand", + // "parameterOrder": [ + // "customerId", + // "deviceId" + // ], + // "parameters": { + // "customerId": { + // "description": "Immutable. ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "deviceId": { + // "description": "Immutable. ID of Chrome OS Device.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}:issueCommand", + // "request": { + // "$ref": "DirectoryChromeosdevicesIssueCommandRequest" + // }, + // "response": { + // "$ref": "DirectoryChromeosdevicesIssueCommandResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.chromeos" + // ] + // } + +} + +// method id "admin.customer.devices.chromeos.commands.get": + +type CustomerDevicesChromeosCommandsGetCall struct { + s *Service + customerId string + deviceId string + commandId int64 + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets command data a specific command issued to the device. +// +// - commandId: Immutable. ID of Chrome OS Device Command. +// - customerId: Immutable. ID of the Google Workspace account. +// - deviceId: Immutable. ID of Chrome OS Device. +func (r *CustomerDevicesChromeosCommandsService) Get(customerId string, deviceId string, commandId int64) *CustomerDevicesChromeosCommandsGetCall { + c := &CustomerDevicesChromeosCommandsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.deviceId = deviceId + c.commandId = commandId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomerDevicesChromeosCommandsGetCall) Fields(s ...googleapi.Field) *CustomerDevicesChromeosCommandsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CustomerDevicesChromeosCommandsGetCall) IfNoneMatch(entityTag string) *CustomerDevicesChromeosCommandsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomerDevicesChromeosCommandsGetCall) Context(ctx context.Context) *CustomerDevicesChromeosCommandsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomerDevicesChromeosCommandsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomerDevicesChromeosCommandsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}/commands/{commandId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "deviceId": c.deviceId, + "commandId": strconv.FormatInt(c.commandId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customer.devices.chromeos.commands.get" call. +// Exactly one of *DirectoryChromeosdevicesCommand or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *DirectoryChromeosdevicesCommand.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *CustomerDevicesChromeosCommandsGetCall) Do(opts ...googleapi.CallOption) (*DirectoryChromeosdevicesCommand, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DirectoryChromeosdevicesCommand{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets command data a specific command issued to the device.", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}/commands/{commandId}", + // "httpMethod": "GET", + // "id": "admin.customer.devices.chromeos.commands.get", + // "parameterOrder": [ + // "customerId", + // "deviceId", + // "commandId" + // ], + // "parameters": { + // "commandId": { + // "description": "Immutable. ID of Chrome OS Device Command.", + // "format": "int64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "customerId": { + // "description": "Immutable. ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "deviceId": { + // "description": "Immutable. ID of Chrome OS Device.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/chromeos/{deviceId}/commands/{commandId}", + // "response": { + // "$ref": "DirectoryChromeosdevicesCommand" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.chromeos", + // "https://www.googleapis.com/auth/admin.directory.device.chromeos.readonly" + // ] + // } + +} + +// method id "directory.customers.get": + +type CustomersGetCall struct { + s *Service + customerKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a customer. +// +// - customerKey: Id of the customer to be retrieved. +func (r *CustomersService) Get(customerKey string) *CustomersGetCall { + c := &CustomersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerKey = customerKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersGetCall) Fields(s ...googleapi.Field) *CustomersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CustomersGetCall) IfNoneMatch(entityTag string) *CustomersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersGetCall) Context(ctx context.Context) *CustomersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customers/{customerKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerKey": c.customerKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.customers.get" call. +// Exactly one of *Customer or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Customer.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *CustomersGetCall) Do(opts ...googleapi.CallOption) (*Customer, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Customer{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a customer.", + // "flatPath": "admin/directory/v1/customers/{customerKey}", + // "httpMethod": "GET", + // "id": "directory.customers.get", + // "parameterOrder": [ + // "customerKey" + // ], + // "parameters": { + // "customerKey": { + // "description": "Id of the customer to be retrieved", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customers/{customerKey}", + // "response": { + // "$ref": "Customer" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.customer", + // "https://www.googleapis.com/auth/admin.directory.customer.readonly" + // ] + // } + +} + +// method id "directory.customers.patch": + +type CustomersPatchCall struct { + s *Service + customerKey string + customer *Customer + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a customer. +// +// - customerKey: Id of the customer to be updated. +func (r *CustomersService) Patch(customerKey string, customer *Customer) *CustomersPatchCall { + c := &CustomersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerKey = customerKey + c.customer = customer + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersPatchCall) Fields(s ...googleapi.Field) *CustomersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersPatchCall) Context(ctx context.Context) *CustomersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.customer) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customers/{customerKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerKey": c.customerKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.customers.patch" call. +// Exactly one of *Customer or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Customer.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *CustomersPatchCall) Do(opts ...googleapi.CallOption) (*Customer, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Customer{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a customer.", + // "flatPath": "admin/directory/v1/customers/{customerKey}", + // "httpMethod": "PATCH", + // "id": "directory.customers.patch", + // "parameterOrder": [ + // "customerKey" + // ], + // "parameters": { + // "customerKey": { + // "description": "Id of the customer to be updated", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customers/{customerKey}", + // "request": { + // "$ref": "Customer" + // }, + // "response": { + // "$ref": "Customer" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.customer" + // ] + // } + +} + +// method id "directory.customers.update": + +type CustomersUpdateCall struct { + s *Service + customerKey string + customer *Customer + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a customer. +// +// - customerKey: Id of the customer to be updated. +func (r *CustomersService) Update(customerKey string, customer *Customer) *CustomersUpdateCall { + c := &CustomersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerKey = customerKey + c.customer = customer + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersUpdateCall) Fields(s ...googleapi.Field) *CustomersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersUpdateCall) Context(ctx context.Context) *CustomersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.customer) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customers/{customerKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerKey": c.customerKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.customers.update" call. +// Exactly one of *Customer or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Customer.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *CustomersUpdateCall) Do(opts ...googleapi.CallOption) (*Customer, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Customer{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a customer.", + // "flatPath": "admin/directory/v1/customers/{customerKey}", + // "httpMethod": "PUT", + // "id": "directory.customers.update", + // "parameterOrder": [ + // "customerKey" + // ], + // "parameters": { + // "customerKey": { + // "description": "Id of the customer to be updated", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customers/{customerKey}", + // "request": { + // "$ref": "Customer" + // }, + // "response": { + // "$ref": "Customer" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.customer" + // ] + // } + +} + +// method id "admin.customers.chrome.printServers.batchCreatePrintServers": + +type CustomersChromePrintServersBatchCreatePrintServersCall struct { + s *Service + parent string + batchcreateprintserversrequest *BatchCreatePrintServersRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchCreatePrintServers: Creates multiple print servers. +// +// - parent: The unique ID +// (https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) +// of the customer's Google Workspace account. Format: +// `customers/{id}`. +func (r *CustomersChromePrintServersService) BatchCreatePrintServers(parent string, batchcreateprintserversrequest *BatchCreatePrintServersRequest) *CustomersChromePrintServersBatchCreatePrintServersCall { + c := &CustomersChromePrintServersBatchCreatePrintServersCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.batchcreateprintserversrequest = batchcreateprintserversrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintServersBatchCreatePrintServersCall) Fields(s ...googleapi.Field) *CustomersChromePrintServersBatchCreatePrintServersCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintServersBatchCreatePrintServersCall) Context(ctx context.Context) *CustomersChromePrintServersBatchCreatePrintServersCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintServersBatchCreatePrintServersCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintServersBatchCreatePrintServersCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchcreateprintserversrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+parent}/chrome/printServers:batchCreatePrintServers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printServers.batchCreatePrintServers" call. +// Exactly one of *BatchCreatePrintServersResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *BatchCreatePrintServersResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *CustomersChromePrintServersBatchCreatePrintServersCall) Do(opts ...googleapi.CallOption) (*BatchCreatePrintServersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BatchCreatePrintServersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates multiple print servers.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers:batchCreatePrintServers", + // "httpMethod": "POST", + // "id": "admin.customers.chrome.printServers.batchCreatePrintServers", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{id}`", + // "location": "path", + // "pattern": "^customers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+parent}/chrome/printServers:batchCreatePrintServers", + // "request": { + // "$ref": "BatchCreatePrintServersRequest" + // }, + // "response": { + // "$ref": "BatchCreatePrintServersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers" + // ] + // } + +} + +// method id "admin.customers.chrome.printServers.batchDeletePrintServers": + +type CustomersChromePrintServersBatchDeletePrintServersCall struct { + s *Service + parent string + batchdeleteprintserversrequest *BatchDeletePrintServersRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchDeletePrintServers: Deletes multiple print servers. +// +// - parent: The unique ID +// (https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) +// of the customer's Google Workspace account. Format: +// `customers/{customer.id}`. +func (r *CustomersChromePrintServersService) BatchDeletePrintServers(parent string, batchdeleteprintserversrequest *BatchDeletePrintServersRequest) *CustomersChromePrintServersBatchDeletePrintServersCall { + c := &CustomersChromePrintServersBatchDeletePrintServersCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.batchdeleteprintserversrequest = batchdeleteprintserversrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintServersBatchDeletePrintServersCall) Fields(s ...googleapi.Field) *CustomersChromePrintServersBatchDeletePrintServersCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintServersBatchDeletePrintServersCall) Context(ctx context.Context) *CustomersChromePrintServersBatchDeletePrintServersCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintServersBatchDeletePrintServersCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintServersBatchDeletePrintServersCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchdeleteprintserversrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+parent}/chrome/printServers:batchDeletePrintServers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printServers.batchDeletePrintServers" call. +// Exactly one of *BatchDeletePrintServersResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *BatchDeletePrintServersResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *CustomersChromePrintServersBatchDeletePrintServersCall) Do(opts ...googleapi.CallOption) (*BatchDeletePrintServersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BatchDeletePrintServersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes multiple print servers.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers:batchDeletePrintServers", + // "httpMethod": "POST", + // "id": "admin.customers.chrome.printServers.batchDeletePrintServers", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{customer.id}`", + // "location": "path", + // "pattern": "^customers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+parent}/chrome/printServers:batchDeletePrintServers", + // "request": { + // "$ref": "BatchDeletePrintServersRequest" + // }, + // "response": { + // "$ref": "BatchDeletePrintServersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers" + // ] + // } + +} + +// method id "admin.customers.chrome.printServers.create": + +type CustomersChromePrintServersCreateCall struct { + s *Service + parent string + printserver *PrintServer + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a print server. +// +// - parent: The unique ID +// (https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) +// of the customer's Google Workspace account. Format: +// `customers/{id}`. +func (r *CustomersChromePrintServersService) Create(parent string, printserver *PrintServer) *CustomersChromePrintServersCreateCall { + c := &CustomersChromePrintServersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.printserver = printserver + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintServersCreateCall) Fields(s ...googleapi.Field) *CustomersChromePrintServersCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintServersCreateCall) Context(ctx context.Context) *CustomersChromePrintServersCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintServersCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintServersCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.printserver) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+parent}/chrome/printServers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printServers.create" call. +// Exactly one of *PrintServer or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *PrintServer.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *CustomersChromePrintServersCreateCall) Do(opts ...googleapi.CallOption) (*PrintServer, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &PrintServer{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a print server.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers", + // "httpMethod": "POST", + // "id": "admin.customers.chrome.printServers.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{id}`", + // "location": "path", + // "pattern": "^customers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+parent}/chrome/printServers", + // "request": { + // "$ref": "PrintServer" + // }, + // "response": { + // "$ref": "PrintServer" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers" + // ] + // } + +} + +// method id "admin.customers.chrome.printServers.delete": + +type CustomersChromePrintServersDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a print server. +// +// - name: The name of the print server to be deleted. Format: +// `customers/{customer.id}/chrome/printServers/{print_server.id}`. +func (r *CustomersChromePrintServersService) Delete(name string) *CustomersChromePrintServersDeleteCall { + c := &CustomersChromePrintServersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintServersDeleteCall) Fields(s ...googleapi.Field) *CustomersChromePrintServersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintServersDeleteCall) Context(ctx context.Context) *CustomersChromePrintServersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintServersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintServersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printServers.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CustomersChromePrintServersDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a print server.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers/{printServersId}", + // "httpMethod": "DELETE", + // "id": "admin.customers.chrome.printServers.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the print server to be deleted. Format: `customers/{customer.id}/chrome/printServers/{print_server.id}`", + // "location": "path", + // "pattern": "^customers/[^/]+/chrome/printServers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers" + // ] + // } + +} + +// method id "admin.customers.chrome.printServers.get": + +type CustomersChromePrintServersGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns a print server's configuration. +// +// - name: The unique ID +// (https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) +// of the customer's Google Workspace account. Format: +// `customers/{id}`. +func (r *CustomersChromePrintServersService) Get(name string) *CustomersChromePrintServersGetCall { + c := &CustomersChromePrintServersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintServersGetCall) Fields(s ...googleapi.Field) *CustomersChromePrintServersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CustomersChromePrintServersGetCall) IfNoneMatch(entityTag string) *CustomersChromePrintServersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintServersGetCall) Context(ctx context.Context) *CustomersChromePrintServersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintServersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintServersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printServers.get" call. +// Exactly one of *PrintServer or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *PrintServer.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *CustomersChromePrintServersGetCall) Do(opts ...googleapi.CallOption) (*PrintServer, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &PrintServer{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns a print server's configuration.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers/{printServersId}", + // "httpMethod": "GET", + // "id": "admin.customers.chrome.printServers.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{id}`", + // "location": "path", + // "pattern": "^customers/[^/]+/chrome/printServers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+name}", + // "response": { + // "$ref": "PrintServer" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers", + // "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + // ] + // } + +} + +// method id "admin.customers.chrome.printServers.list": + +type CustomersChromePrintServersListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists print server configurations. +// +// - parent: The unique ID +// (https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) +// of the customer's Google Workspace account. Format: +// `customers/{id}`. +func (r *CustomersChromePrintServersService) List(parent string) *CustomersChromePrintServersListCall { + c := &CustomersChromePrintServersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Search query in Common +// Expression Language syntax (https://github.com/google/cel-spec). +// Supported filters are `display_name`, `description`, and `uri`. +// Example: `printServer.displayName=='marketing-queue'`. +func (c *CustomersChromePrintServersListCall) Filter(filter string) *CustomersChromePrintServersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sort order for +// results. Supported values are `display_name`, `description`, or +// `create_time`. Default order is ascending, but descending order can +// be returned by appending "desc" to the `order_by` field. For +// instance, `orderBy=='description desc'` returns the print servers +// sorted by description in descending order. +func (c *CustomersChromePrintServersListCall) OrderBy(orderBy string) *CustomersChromePrintServersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// OrgUnitId sets the optional parameter "orgUnitId": If `org_unit_id` +// is present in the request, only print servers owned or inherited by +// the organizational unit (OU) are returned. If the `PrintServer` +// resource's `org_unit_id` matches the one in the request, the OU owns +// the server. If `org_unit_id` is not specified in the request, all +// print servers are returned or filtered against. +func (c *CustomersChromePrintServersListCall) OrgUnitId(orgUnitId string) *CustomersChromePrintServersListCall { + c.urlParams_.Set("orgUnitId", orgUnitId) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of objects to return (default `100`, max `100`). The service might +// return fewer than this value. +func (c *CustomersChromePrintServersListCall) PageSize(pageSize int64) *CustomersChromePrintServersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A generated token +// to paginate results (the `next_page_token` from a previous call). +func (c *CustomersChromePrintServersListCall) PageToken(pageToken string) *CustomersChromePrintServersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintServersListCall) Fields(s ...googleapi.Field) *CustomersChromePrintServersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CustomersChromePrintServersListCall) IfNoneMatch(entityTag string) *CustomersChromePrintServersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintServersListCall) Context(ctx context.Context) *CustomersChromePrintServersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintServersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintServersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+parent}/chrome/printServers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printServers.list" call. +// Exactly one of *ListPrintServersResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ListPrintServersResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *CustomersChromePrintServersListCall) Do(opts ...googleapi.CallOption) (*ListPrintServersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListPrintServersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists print server configurations.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers", + // "httpMethod": "GET", + // "id": "admin.customers.chrome.printServers.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "filter": { + // "description": "Search query in [Common Expression Language syntax](https://github.com/google/cel-spec). Supported filters are `display_name`, `description`, and `uri`. Example: `printServer.displayName=='marketing-queue'`.", + // "location": "query", + // "type": "string" + // }, + // "orderBy": { + // "description": "Sort order for results. Supported values are `display_name`, `description`, or `create_time`. Default order is ascending, but descending order can be returned by appending \"desc\" to the `order_by` field. For instance, `orderBy=='description desc'` returns the print servers sorted by description in descending order.", + // "location": "query", + // "type": "string" + // }, + // "orgUnitId": { + // "description": "If `org_unit_id` is present in the request, only print servers owned or inherited by the organizational unit (OU) are returned. If the `PrintServer` resource's `org_unit_id` matches the one in the request, the OU owns the server. If `org_unit_id` is not specified in the request, all print servers are returned or filtered against.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of objects to return (default `100`, max `100`). The service might return fewer than this value.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A generated token to paginate results (the `next_page_token` from a previous call).", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The [unique ID](https://developers.google.com/admin-sdk/directory/reference/rest/v1/customers) of the customer's Google Workspace account. Format: `customers/{id}`", + // "location": "path", + // "pattern": "^customers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+parent}/chrome/printServers", + // "response": { + // "$ref": "ListPrintServersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers", + // "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *CustomersChromePrintServersListCall) Pages(ctx context.Context, f func(*ListPrintServersResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "admin.customers.chrome.printServers.patch": + +type CustomersChromePrintServersPatchCall struct { + s *Service + name string + printserver *PrintServer + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a print server's configuration. +// +// - name: Immutable. Resource name of the print server. Leave empty +// when creating. Format: +// `customers/{customer.id}/printServers/{print_server.id}`. +func (r *CustomersChromePrintServersService) Patch(name string, printserver *PrintServer) *CustomersChromePrintServersPatchCall { + c := &CustomersChromePrintServersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.printserver = printserver + return c +} + +// UpdateMask sets the optional parameter "updateMask": The list of +// fields to update. Some fields are read-only and cannot be updated. +// Values for unspecified fields are patched. +func (c *CustomersChromePrintServersPatchCall) UpdateMask(updateMask string) *CustomersChromePrintServersPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintServersPatchCall) Fields(s ...googleapi.Field) *CustomersChromePrintServersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintServersPatchCall) Context(ctx context.Context) *CustomersChromePrintServersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintServersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintServersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.printserver) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printServers.patch" call. +// Exactly one of *PrintServer or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *PrintServer.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *CustomersChromePrintServersPatchCall) Do(opts ...googleapi.CallOption) (*PrintServer, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &PrintServer{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a print server's configuration.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printServers/{printServersId}", + // "httpMethod": "PATCH", + // "id": "admin.customers.chrome.printServers.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Immutable. Resource name of the print server. Leave empty when creating. Format: `customers/{customer.id}/printServers/{print_server.id}`", + // "location": "path", + // "pattern": "^customers/[^/]+/chrome/printServers/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "The list of fields to update. Some fields are read-only and cannot be updated. Values for unspecified fields are patched.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+name}", + // "request": { + // "$ref": "PrintServer" + // }, + // "response": { + // "$ref": "PrintServer" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers" + // ] + // } + +} + +// method id "admin.customers.chrome.printers.batchCreatePrinters": + +type CustomersChromePrintersBatchCreatePrintersCall struct { + s *Service + parent string + batchcreateprintersrequest *BatchCreatePrintersRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchCreatePrinters: Creates printers under given Organization Unit. +// +// - parent: The name of the customer. Format: customers/{customer_id}. +func (r *CustomersChromePrintersService) BatchCreatePrinters(parent string, batchcreateprintersrequest *BatchCreatePrintersRequest) *CustomersChromePrintersBatchCreatePrintersCall { + c := &CustomersChromePrintersBatchCreatePrintersCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.batchcreateprintersrequest = batchcreateprintersrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintersBatchCreatePrintersCall) Fields(s ...googleapi.Field) *CustomersChromePrintersBatchCreatePrintersCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintersBatchCreatePrintersCall) Context(ctx context.Context) *CustomersChromePrintersBatchCreatePrintersCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintersBatchCreatePrintersCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintersBatchCreatePrintersCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchcreateprintersrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+parent}/chrome/printers:batchCreatePrinters") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printers.batchCreatePrinters" call. +// Exactly one of *BatchCreatePrintersResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BatchCreatePrintersResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *CustomersChromePrintersBatchCreatePrintersCall) Do(opts ...googleapi.CallOption) (*BatchCreatePrintersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BatchCreatePrintersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates printers under given Organization Unit.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers:batchCreatePrinters", + // "httpMethod": "POST", + // "id": "admin.customers.chrome.printers.batchCreatePrinters", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the customer. Format: customers/{customer_id}", + // "location": "path", + // "pattern": "^customers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+parent}/chrome/printers:batchCreatePrinters", + // "request": { + // "$ref": "BatchCreatePrintersRequest" + // }, + // "response": { + // "$ref": "BatchCreatePrintersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers" + // ] + // } + +} + +// method id "admin.customers.chrome.printers.batchDeletePrinters": + +type CustomersChromePrintersBatchDeletePrintersCall struct { + s *Service + parent string + batchdeleteprintersrequest *BatchDeletePrintersRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchDeletePrinters: Deletes printers in batch. +// +// - parent: The name of the customer. Format: customers/{customer_id}. +func (r *CustomersChromePrintersService) BatchDeletePrinters(parent string, batchdeleteprintersrequest *BatchDeletePrintersRequest) *CustomersChromePrintersBatchDeletePrintersCall { + c := &CustomersChromePrintersBatchDeletePrintersCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.batchdeleteprintersrequest = batchdeleteprintersrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintersBatchDeletePrintersCall) Fields(s ...googleapi.Field) *CustomersChromePrintersBatchDeletePrintersCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintersBatchDeletePrintersCall) Context(ctx context.Context) *CustomersChromePrintersBatchDeletePrintersCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintersBatchDeletePrintersCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintersBatchDeletePrintersCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.batchdeleteprintersrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+parent}/chrome/printers:batchDeletePrinters") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printers.batchDeletePrinters" call. +// Exactly one of *BatchDeletePrintersResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BatchDeletePrintersResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *CustomersChromePrintersBatchDeletePrintersCall) Do(opts ...googleapi.CallOption) (*BatchDeletePrintersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BatchDeletePrintersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes printers in batch.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers:batchDeletePrinters", + // "httpMethod": "POST", + // "id": "admin.customers.chrome.printers.batchDeletePrinters", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the customer. Format: customers/{customer_id}", + // "location": "path", + // "pattern": "^customers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+parent}/chrome/printers:batchDeletePrinters", + // "request": { + // "$ref": "BatchDeletePrintersRequest" + // }, + // "response": { + // "$ref": "BatchDeletePrintersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers" + // ] + // } + +} + +// method id "admin.customers.chrome.printers.create": + +type CustomersChromePrintersCreateCall struct { + s *Service + parent string + printer *Printer + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a printer under given Organization Unit. +// +// - parent: The name of the customer. Format: customers/{customer_id}. +func (r *CustomersChromePrintersService) Create(parent string, printer *Printer) *CustomersChromePrintersCreateCall { + c := &CustomersChromePrintersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.printer = printer + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintersCreateCall) Fields(s ...googleapi.Field) *CustomersChromePrintersCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintersCreateCall) Context(ctx context.Context) *CustomersChromePrintersCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintersCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintersCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.printer) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+parent}/chrome/printers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printers.create" call. +// Exactly one of *Printer or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Printer.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CustomersChromePrintersCreateCall) Do(opts ...googleapi.CallOption) (*Printer, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Printer{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a printer under given Organization Unit.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers", + // "httpMethod": "POST", + // "id": "admin.customers.chrome.printers.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the customer. Format: customers/{customer_id}", + // "location": "path", + // "pattern": "^customers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+parent}/chrome/printers", + // "request": { + // "$ref": "Printer" + // }, + // "response": { + // "$ref": "Printer" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers" + // ] + // } + +} + +// method id "admin.customers.chrome.printers.delete": + +type CustomersChromePrintersDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a `Printer`. +// +// - name: The name of the printer to be updated. Format: +// customers/{customer_id}/chrome/printers/{printer_id}. +func (r *CustomersChromePrintersService) Delete(name string) *CustomersChromePrintersDeleteCall { + c := &CustomersChromePrintersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintersDeleteCall) Fields(s ...googleapi.Field) *CustomersChromePrintersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintersDeleteCall) Context(ctx context.Context) *CustomersChromePrintersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printers.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CustomersChromePrintersDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a `Printer`.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers/{printersId}", + // "httpMethod": "DELETE", + // "id": "admin.customers.chrome.printers.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the printer to be updated. Format: customers/{customer_id}/chrome/printers/{printer_id}", + // "location": "path", + // "pattern": "^customers/[^/]+/chrome/printers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers" + // ] + // } + +} + +// method id "admin.customers.chrome.printers.get": + +type CustomersChromePrintersGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns a `Printer` resource (printer's config). +// +// - name: The name of the printer to retrieve. Format: +// customers/{customer_id}/chrome/printers/{printer_id}. +func (r *CustomersChromePrintersService) Get(name string) *CustomersChromePrintersGetCall { + c := &CustomersChromePrintersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintersGetCall) Fields(s ...googleapi.Field) *CustomersChromePrintersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CustomersChromePrintersGetCall) IfNoneMatch(entityTag string) *CustomersChromePrintersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintersGetCall) Context(ctx context.Context) *CustomersChromePrintersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printers.get" call. +// Exactly one of *Printer or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Printer.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CustomersChromePrintersGetCall) Do(opts ...googleapi.CallOption) (*Printer, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Printer{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns a `Printer` resource (printer's config).", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers/{printersId}", + // "httpMethod": "GET", + // "id": "admin.customers.chrome.printers.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the printer to retrieve. Format: customers/{customer_id}/chrome/printers/{printer_id}", + // "location": "path", + // "pattern": "^customers/[^/]+/chrome/printers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+name}", + // "response": { + // "$ref": "Printer" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers", + // "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + // ] + // } + +} + +// method id "admin.customers.chrome.printers.list": + +type CustomersChromePrintersListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: List printers configs. +// +// - parent: The name of the customer who owns this collection of +// printers. Format: customers/{customer_id}. +func (r *CustomersChromePrintersService) List(parent string) *CustomersChromePrintersListCall { + c := &CustomersChromePrintersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Search query. Search +// syntax is shared between this api and Admin Console printers pages. +func (c *CustomersChromePrintersListCall) Filter(filter string) *CustomersChromePrintersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": The order to sort +// results by. Must be one of display_name, description, make_and_model, +// or create_time. Default order is ascending, but descending order can +// be returned by appending "desc" to the order_by field. For instance, +// "description desc" will return the printers sorted by description in +// descending order. +func (c *CustomersChromePrintersListCall) OrderBy(orderBy string) *CustomersChromePrintersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// OrgUnitId sets the optional parameter "orgUnitId": Organization Unit +// that we want to list the printers for. When org_unit is not present +// in the request then all printers of the customer are returned (or +// filtered). When org_unit is present in the request then only printers +// available to this OU will be returned (owned or inherited). You may +// see if printer is owned or inherited for this OU by looking at +// Printer.org_unit_id. +func (c *CustomersChromePrintersListCall) OrgUnitId(orgUnitId string) *CustomersChromePrintersListCall { + c.urlParams_.Set("orgUnitId", orgUnitId) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of objects to return. The service may return fewer than this value. +func (c *CustomersChromePrintersListCall) PageSize(pageSize int64) *CustomersChromePrintersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token, +// received from a previous call. +func (c *CustomersChromePrintersListCall) PageToken(pageToken string) *CustomersChromePrintersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintersListCall) Fields(s ...googleapi.Field) *CustomersChromePrintersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CustomersChromePrintersListCall) IfNoneMatch(entityTag string) *CustomersChromePrintersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintersListCall) Context(ctx context.Context) *CustomersChromePrintersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+parent}/chrome/printers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printers.list" call. +// Exactly one of *ListPrintersResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListPrintersResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *CustomersChromePrintersListCall) Do(opts ...googleapi.CallOption) (*ListPrintersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListPrintersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List printers configs.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers", + // "httpMethod": "GET", + // "id": "admin.customers.chrome.printers.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "filter": { + // "description": "Search query. Search syntax is shared between this api and Admin Console printers pages.", + // "location": "query", + // "type": "string" + // }, + // "orderBy": { + // "description": "The order to sort results by. Must be one of display_name, description, make_and_model, or create_time. Default order is ascending, but descending order can be returned by appending \"desc\" to the order_by field. For instance, \"description desc\" will return the printers sorted by description in descending order.", + // "location": "query", + // "type": "string" + // }, + // "orgUnitId": { + // "description": "Organization Unit that we want to list the printers for. When org_unit is not present in the request then all printers of the customer are returned (or filtered). When org_unit is present in the request then only printers available to this OU will be returned (owned or inherited). You may see if printer is owned or inherited for this OU by looking at Printer.org_unit_id.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of objects to return. The service may return fewer than this value.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A page token, received from a previous call.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The name of the customer who owns this collection of printers. Format: customers/{customer_id}", + // "location": "path", + // "pattern": "^customers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+parent}/chrome/printers", + // "response": { + // "$ref": "ListPrintersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers", + // "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *CustomersChromePrintersListCall) Pages(ctx context.Context, f func(*ListPrintersResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "admin.customers.chrome.printers.listPrinterModels": + +type CustomersChromePrintersListPrinterModelsCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListPrinterModels: Lists the supported printer models. +// +// - parent: The name of the customer who owns this collection of +// printers. Format: customers/{customer_id}. +func (r *CustomersChromePrintersService) ListPrinterModels(parent string) *CustomersChromePrintersListPrinterModelsCall { + c := &CustomersChromePrintersListPrinterModelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Filer to list only +// models by a given manufacturer in format: "manufacturer:Brother". +// Search syntax is shared between this api and Admin Console printers +// pages. +func (c *CustomersChromePrintersListPrinterModelsCall) Filter(filter string) *CustomersChromePrintersListPrinterModelsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of objects to return. The service may return fewer than this value. +func (c *CustomersChromePrintersListPrinterModelsCall) PageSize(pageSize int64) *CustomersChromePrintersListPrinterModelsCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token, +// received from a previous call. +func (c *CustomersChromePrintersListPrinterModelsCall) PageToken(pageToken string) *CustomersChromePrintersListPrinterModelsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintersListPrinterModelsCall) Fields(s ...googleapi.Field) *CustomersChromePrintersListPrinterModelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *CustomersChromePrintersListPrinterModelsCall) IfNoneMatch(entityTag string) *CustomersChromePrintersListPrinterModelsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintersListPrinterModelsCall) Context(ctx context.Context) *CustomersChromePrintersListPrinterModelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintersListPrinterModelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintersListPrinterModelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+parent}/chrome/printers:listPrinterModels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printers.listPrinterModels" call. +// Exactly one of *ListPrinterModelsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ListPrinterModelsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *CustomersChromePrintersListPrinterModelsCall) Do(opts ...googleapi.CallOption) (*ListPrinterModelsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListPrinterModelsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the supported printer models.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers:listPrinterModels", + // "httpMethod": "GET", + // "id": "admin.customers.chrome.printers.listPrinterModels", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "filter": { + // "description": "Filer to list only models by a given manufacturer in format: \"manufacturer:Brother\". Search syntax is shared between this api and Admin Console printers pages.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of objects to return. The service may return fewer than this value.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A page token, received from a previous call.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The name of the customer who owns this collection of printers. Format: customers/{customer_id}", + // "location": "path", + // "pattern": "^customers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+parent}/chrome/printers:listPrinterModels", + // "response": { + // "$ref": "ListPrinterModelsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers", + // "https://www.googleapis.com/auth/admin.chrome.printers.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *CustomersChromePrintersListPrinterModelsCall) Pages(ctx context.Context, f func(*ListPrinterModelsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "admin.customers.chrome.printers.patch": + +type CustomersChromePrintersPatchCall struct { + s *Service + name string + printer *Printer + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a `Printer` resource. +// +// - name: The resource name of the Printer object, in the format +// customers/{customer-id}/printers/{printer-id} (During printer +// creation leave empty). +func (r *CustomersChromePrintersService) Patch(name string, printer *Printer) *CustomersChromePrintersPatchCall { + c := &CustomersChromePrintersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.printer = printer + return c +} + +// ClearMask sets the optional parameter "clearMask": The list of fields +// to be cleared. Note, some of the fields are read only and cannot be +// updated. Values for not specified fields will be patched. +func (c *CustomersChromePrintersPatchCall) ClearMask(clearMask string) *CustomersChromePrintersPatchCall { + c.urlParams_.Set("clearMask", clearMask) + return c +} + +// UpdateMask sets the optional parameter "updateMask": The list of +// fields to be updated. Note, some of the fields are read only and +// cannot be updated. Values for not specified fields will be patched. +func (c *CustomersChromePrintersPatchCall) UpdateMask(updateMask string) *CustomersChromePrintersPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *CustomersChromePrintersPatchCall) Fields(s ...googleapi.Field) *CustomersChromePrintersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *CustomersChromePrintersPatchCall) Context(ctx context.Context) *CustomersChromePrintersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *CustomersChromePrintersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *CustomersChromePrintersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.printer) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "admin.customers.chrome.printers.patch" call. +// Exactly one of *Printer or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Printer.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *CustomersChromePrintersPatchCall) Do(opts ...googleapi.CallOption) (*Printer, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Printer{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a `Printer` resource.", + // "flatPath": "admin/directory/v1/customers/{customersId}/chrome/printers/{printersId}", + // "httpMethod": "PATCH", + // "id": "admin.customers.chrome.printers.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "clearMask": { + // "description": "The list of fields to be cleared. Note, some of the fields are read only and cannot be updated. Values for not specified fields will be patched.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The resource name of the Printer object, in the format customers/{customer-id}/printers/{printer-id} (During printer creation leave empty)", + // "location": "path", + // "pattern": "^customers/[^/]+/chrome/printers/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "The list of fields to be updated. Note, some of the fields are read only and cannot be updated. Values for not specified fields will be patched.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/{+name}", + // "request": { + // "$ref": "Printer" + // }, + // "response": { + // "$ref": "Printer" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.chrome.printers" + // ] + // } + +} + +// method id "directory.domainAliases.delete": + +type DomainAliasesDeleteCall struct { + s *Service + customer string + domainAliasName string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a domain Alias of the customer. +// +// - customer: Immutable ID of the Google Workspace account. +// - domainAliasName: Name of domain alias to be retrieved. +func (r *DomainAliasesService) Delete(customer string, domainAliasName string) *DomainAliasesDeleteCall { + c := &DomainAliasesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.domainAliasName = domainAliasName + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DomainAliasesDeleteCall) Fields(s ...googleapi.Field) *DomainAliasesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DomainAliasesDeleteCall) Context(ctx context.Context) *DomainAliasesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DomainAliasesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DomainAliasesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/domainaliases/{domainAliasName}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "domainAliasName": c.domainAliasName, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.domainAliases.delete" call. +func (c *DomainAliasesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes a domain Alias of the customer.", + // "flatPath": "admin/directory/v1/customer/{customer}/domainaliases/{domainAliasName}", + // "httpMethod": "DELETE", + // "id": "directory.domainAliases.delete", + // "parameterOrder": [ + // "customer", + // "domainAliasName" + // ], + // "parameters": { + // "customer": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "domainAliasName": { + // "description": "Name of domain alias to be retrieved.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/domainaliases/{domainAliasName}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.domain" + // ] + // } + +} + +// method id "directory.domainAliases.get": + +type DomainAliasesGetCall struct { + s *Service + customer string + domainAliasName string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a domain alias of the customer. +// +// - customer: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +// - domainAliasName: Name of domain alias to be retrieved. +func (r *DomainAliasesService) Get(customer string, domainAliasName string) *DomainAliasesGetCall { + c := &DomainAliasesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.domainAliasName = domainAliasName + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DomainAliasesGetCall) Fields(s ...googleapi.Field) *DomainAliasesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DomainAliasesGetCall) IfNoneMatch(entityTag string) *DomainAliasesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DomainAliasesGetCall) Context(ctx context.Context) *DomainAliasesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DomainAliasesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DomainAliasesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/domainaliases/{domainAliasName}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "domainAliasName": c.domainAliasName, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.domainAliases.get" call. +// Exactly one of *DomainAlias or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *DomainAlias.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DomainAliasesGetCall) Do(opts ...googleapi.CallOption) (*DomainAlias, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DomainAlias{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a domain alias of the customer.", + // "flatPath": "admin/directory/v1/customer/{customer}/domainaliases/{domainAliasName}", + // "httpMethod": "GET", + // "id": "directory.domainAliases.get", + // "parameterOrder": [ + // "customer", + // "domainAliasName" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "domainAliasName": { + // "description": "Name of domain alias to be retrieved.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/domainaliases/{domainAliasName}", + // "response": { + // "$ref": "DomainAlias" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.domain", + // "https://www.googleapis.com/auth/admin.directory.domain.readonly" + // ] + // } + +} + +// method id "directory.domainAliases.insert": + +type DomainAliasesInsertCall struct { + s *Service + customer string + domainalias *DomainAlias + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Inserts a domain alias of the customer. +// +// - customer: Immutable ID of the Google Workspace account. +func (r *DomainAliasesService) Insert(customer string, domainalias *DomainAlias) *DomainAliasesInsertCall { + c := &DomainAliasesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.domainalias = domainalias + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DomainAliasesInsertCall) Fields(s ...googleapi.Field) *DomainAliasesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DomainAliasesInsertCall) Context(ctx context.Context) *DomainAliasesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DomainAliasesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DomainAliasesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.domainalias) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/domainaliases") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.domainAliases.insert" call. +// Exactly one of *DomainAlias or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *DomainAlias.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DomainAliasesInsertCall) Do(opts ...googleapi.CallOption) (*DomainAlias, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DomainAlias{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a domain alias of the customer.", + // "flatPath": "admin/directory/v1/customer/{customer}/domainaliases", + // "httpMethod": "POST", + // "id": "directory.domainAliases.insert", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/domainaliases", + // "request": { + // "$ref": "DomainAlias" + // }, + // "response": { + // "$ref": "DomainAlias" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.domain" + // ] + // } + +} + +// method id "directory.domainAliases.list": + +type DomainAliasesListCall struct { + s *Service + customer string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the domain aliases of the customer. +// +// - customer: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +func (r *DomainAliasesService) List(customer string) *DomainAliasesListCall { + c := &DomainAliasesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + return c +} + +// ParentDomainName sets the optional parameter "parentDomainName": Name +// of the parent domain for which domain aliases are to be fetched. +func (c *DomainAliasesListCall) ParentDomainName(parentDomainName string) *DomainAliasesListCall { + c.urlParams_.Set("parentDomainName", parentDomainName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DomainAliasesListCall) Fields(s ...googleapi.Field) *DomainAliasesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DomainAliasesListCall) IfNoneMatch(entityTag string) *DomainAliasesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DomainAliasesListCall) Context(ctx context.Context) *DomainAliasesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DomainAliasesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DomainAliasesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/domainaliases") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.domainAliases.list" call. +// Exactly one of *DomainAliases or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *DomainAliases.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DomainAliasesListCall) Do(opts ...googleapi.CallOption) (*DomainAliases, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &DomainAliases{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the domain aliases of the customer.", + // "flatPath": "admin/directory/v1/customer/{customer}/domainaliases", + // "httpMethod": "GET", + // "id": "directory.domainAliases.list", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "parentDomainName": { + // "description": "Name of the parent domain for which domain aliases are to be fetched.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/domainaliases", + // "response": { + // "$ref": "DomainAliases" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.domain", + // "https://www.googleapis.com/auth/admin.directory.domain.readonly" + // ] + // } + +} + +// method id "directory.domains.delete": + +type DomainsDeleteCall struct { + s *Service + customer string + domainName string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a domain of the customer. +// +// - customer: Immutable ID of the Google Workspace account. +// - domainName: Name of domain to be deleted. +func (r *DomainsService) Delete(customer string, domainName string) *DomainsDeleteCall { + c := &DomainsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.domainName = domainName + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DomainsDeleteCall) Fields(s ...googleapi.Field) *DomainsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DomainsDeleteCall) Context(ctx context.Context) *DomainsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DomainsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DomainsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/domains/{domainName}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "domainName": c.domainName, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.domains.delete" call. +func (c *DomainsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes a domain of the customer.", + // "flatPath": "admin/directory/v1/customer/{customer}/domains/{domainName}", + // "httpMethod": "DELETE", + // "id": "directory.domains.delete", + // "parameterOrder": [ + // "customer", + // "domainName" + // ], + // "parameters": { + // "customer": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "domainName": { + // "description": "Name of domain to be deleted", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/domains/{domainName}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.domain" + // ] + // } + +} + +// method id "directory.domains.get": + +type DomainsGetCall struct { + s *Service + customer string + domainName string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a domain of the customer. +// +// - customer: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +// - domainName: Name of domain to be retrieved. +func (r *DomainsService) Get(customer string, domainName string) *DomainsGetCall { + c := &DomainsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.domainName = domainName + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DomainsGetCall) Fields(s ...googleapi.Field) *DomainsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DomainsGetCall) IfNoneMatch(entityTag string) *DomainsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DomainsGetCall) Context(ctx context.Context) *DomainsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DomainsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DomainsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/domains/{domainName}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "domainName": c.domainName, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.domains.get" call. +// Exactly one of *Domains or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Domains.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DomainsGetCall) Do(opts ...googleapi.CallOption) (*Domains, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Domains{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a domain of the customer.", + // "flatPath": "admin/directory/v1/customer/{customer}/domains/{domainName}", + // "httpMethod": "GET", + // "id": "directory.domains.get", + // "parameterOrder": [ + // "customer", + // "domainName" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "domainName": { + // "description": "Name of domain to be retrieved", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/domains/{domainName}", + // "response": { + // "$ref": "Domains" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.domain", + // "https://www.googleapis.com/auth/admin.directory.domain.readonly" + // ] + // } + +} + +// method id "directory.domains.insert": + +type DomainsInsertCall struct { + s *Service + customer string + domains *Domains + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Inserts a domain of the customer. +// +// - customer: Immutable ID of the Google Workspace account. +func (r *DomainsService) Insert(customer string, domains *Domains) *DomainsInsertCall { + c := &DomainsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.domains = domains + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DomainsInsertCall) Fields(s ...googleapi.Field) *DomainsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DomainsInsertCall) Context(ctx context.Context) *DomainsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DomainsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DomainsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.domains) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/domains") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.domains.insert" call. +// Exactly one of *Domains or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Domains.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DomainsInsertCall) Do(opts ...googleapi.CallOption) (*Domains, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Domains{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a domain of the customer.", + // "flatPath": "admin/directory/v1/customer/{customer}/domains", + // "httpMethod": "POST", + // "id": "directory.domains.insert", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/domains", + // "request": { + // "$ref": "Domains" + // }, + // "response": { + // "$ref": "Domains" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.domain" + // ] + // } + +} + +// method id "directory.domains.list": + +type DomainsListCall struct { + s *Service + customer string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the domains of the customer. +// +// - customer: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +func (r *DomainsService) List(customer string) *DomainsListCall { + c := &DomainsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DomainsListCall) Fields(s ...googleapi.Field) *DomainsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DomainsListCall) IfNoneMatch(entityTag string) *DomainsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DomainsListCall) Context(ctx context.Context) *DomainsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DomainsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DomainsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/domains") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.domains.list" call. +// Exactly one of *Domains2 or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Domains2.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DomainsListCall) Do(opts ...googleapi.CallOption) (*Domains2, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Domains2{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the domains of the customer.", + // "flatPath": "admin/directory/v1/customer/{customer}/domains", + // "httpMethod": "GET", + // "id": "directory.domains.list", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/domains", + // "response": { + // "$ref": "Domains2" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.domain", + // "https://www.googleapis.com/auth/admin.directory.domain.readonly" + // ] + // } + +} + +// method id "directory.groups.delete": + +type GroupsDeleteCall struct { + s *Service + groupKey string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a group. +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +func (r *GroupsService) Delete(groupKey string) *GroupsDeleteCall { + c := &GroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GroupsDeleteCall) Fields(s ...googleapi.Field) *GroupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GroupsDeleteCall) Context(ctx context.Context) *GroupsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GroupsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GroupsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.groups.delete" call. +func (c *GroupsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes a group.", + // "flatPath": "admin/directory/v1/groups/{groupKey}", + // "httpMethod": "DELETE", + // "id": "directory.groups.delete", + // "parameterOrder": [ + // "groupKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group" + // ] + // } + +} + +// method id "directory.groups.get": + +type GroupsGetCall struct { + s *Service + groupKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a group's properties. +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +func (r *GroupsService) Get(groupKey string) *GroupsGetCall { + c := &GroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GroupsGetCall) Fields(s ...googleapi.Field) *GroupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GroupsGetCall) IfNoneMatch(entityTag string) *GroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GroupsGetCall) Context(ctx context.Context) *GroupsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GroupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GroupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.groups.get" call. +// Exactly one of *Group or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Group.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GroupsGetCall) Do(opts ...googleapi.CallOption) (*Group, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Group{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a group's properties.", + // "flatPath": "admin/directory/v1/groups/{groupKey}", + // "httpMethod": "GET", + // "id": "directory.groups.get", + // "parameterOrder": [ + // "groupKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}", + // "response": { + // "$ref": "Group" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group", + // "https://www.googleapis.com/auth/admin.directory.group.readonly" + // ] + // } + +} + +// method id "directory.groups.insert": + +type GroupsInsertCall struct { + s *Service + group *Group + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a group. +func (r *GroupsService) Insert(group *Group) *GroupsInsertCall { + c := &GroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.group = group + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GroupsInsertCall) Fields(s ...googleapi.Field) *GroupsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GroupsInsertCall) Context(ctx context.Context) *GroupsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GroupsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GroupsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.groups.insert" call. +// Exactly one of *Group or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Group.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GroupsInsertCall) Do(opts ...googleapi.CallOption) (*Group, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Group{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a group.", + // "flatPath": "admin/directory/v1/groups", + // "httpMethod": "POST", + // "id": "directory.groups.insert", + // "parameterOrder": [], + // "parameters": {}, + // "path": "admin/directory/v1/groups", + // "request": { + // "$ref": "Group" + // }, + // "response": { + // "$ref": "Group" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group" + // ] + // } + +} + +// method id "directory.groups.list": + +type GroupsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves all groups of a domain or of a user given a userKey +// (paginated). +func (r *GroupsService) List() *GroupsListCall { + c := &GroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Customer sets the optional parameter "customer": The unique ID for +// the customer's Google Workspace account. In case of a multi-domain +// account, to fetch all groups for a customer, use this field instead +// of `domain`. You can also use the `my_customer` alias to represent +// your account's `customerId`. The `customerId` is also returned as +// part of the Users (/admin-sdk/directory/v1/reference/users) resource. +// You must provide either the `customer` or the `domain` parameter. +func (c *GroupsListCall) Customer(customer string) *GroupsListCall { + c.urlParams_.Set("customer", customer) + return c +} + +// Domain sets the optional parameter "domain": The domain name. Use +// this field to get groups from only one domain. To return all domains +// for a customer account, use the `customer` query parameter instead. +func (c *GroupsListCall) Domain(domain string) *GroupsListCall { + c.urlParams_.Set("domain", domain) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. Max allowed value is 200. +func (c *GroupsListCall) MaxResults(maxResults int64) *GroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Column to use for +// sorting results +// +// Possible values: +// +// "email" - Email of the group. +func (c *GroupsListCall) OrderBy(orderBy string) *GroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// next page in the list +func (c *GroupsListCall) PageToken(pageToken string) *GroupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Query sets the optional parameter "query": Query string search. +// Should be of the form "". Complete documentation is at https: +// //developers.google.com/admin-sdk/directory/v1/guides/search-groups +func (c *GroupsListCall) Query(query string) *GroupsListCall { + c.urlParams_.Set("query", query) + return c +} + +// SortOrder sets the optional parameter "sortOrder": Whether to return +// results in ascending or descending order. Only of use when orderBy is +// also used +// +// Possible values: +// +// "ASCENDING" - Ascending order. +// "DESCENDING" - Descending order. +func (c *GroupsListCall) SortOrder(sortOrder string) *GroupsListCall { + c.urlParams_.Set("sortOrder", sortOrder) + return c +} + +// UserKey sets the optional parameter "userKey": Email or immutable ID +// of the user if only those groups are to be listed, the given user is +// a member of. If it's an ID, it should match with the ID of the user +// object. +func (c *GroupsListCall) UserKey(userKey string) *GroupsListCall { + c.urlParams_.Set("userKey", userKey) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GroupsListCall) Fields(s ...googleapi.Field) *GroupsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GroupsListCall) IfNoneMatch(entityTag string) *GroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GroupsListCall) Context(ctx context.Context) *GroupsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GroupsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GroupsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.groups.list" call. +// Exactly one of *Groups or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Groups.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GroupsListCall) Do(opts ...googleapi.CallOption) (*Groups, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Groups{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves all groups of a domain or of a user given a userKey (paginated).", + // "flatPath": "admin/directory/v1/groups", + // "httpMethod": "GET", + // "id": "directory.groups.list", + // "parameterOrder": [], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "query", + // "type": "string" + // }, + // "domain": { + // "description": "The domain name. Use this field to get groups from only one domain. To return all domains for a customer account, use the `customer` query parameter instead.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "200", + // "description": "Maximum number of results to return. Max allowed value is 200.", + // "format": "int32", + // "location": "query", + // "minimum": "1", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Column to use for sorting results", + // "enum": [ + // "email" + // ], + // "enumDescriptions": [ + // "Email of the group." + // ], + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Token to specify next page in the list", + // "location": "query", + // "type": "string" + // }, + // "query": { + // "description": "Query string search. Should be of the form \"\". Complete documentation is at https: //developers.google.com/admin-sdk/directory/v1/guides/search-groups", + // "location": "query", + // "type": "string" + // }, + // "sortOrder": { + // "description": "Whether to return results in ascending or descending order. Only of use when orderBy is also used", + // "enum": [ + // "ASCENDING", + // "DESCENDING" + // ], + // "enumDescriptions": [ + // "Ascending order.", + // "Descending order." + // ], + // "location": "query", + // "type": "string" + // }, + // "userKey": { + // "description": "Email or immutable ID of the user if only those groups are to be listed, the given user is a member of. If it's an ID, it should match with the ID of the user object.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups", + // "response": { + // "$ref": "Groups" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group", + // "https://www.googleapis.com/auth/admin.directory.group.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GroupsListCall) Pages(ctx context.Context, f func(*Groups) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "directory.groups.patch": + +type GroupsPatchCall struct { + s *Service + groupKey string + group *Group + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a group's properties. This method supports patch +// semantics (/admin-sdk/directory/v1/guides/performance#patch). +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +func (r *GroupsService) Patch(groupKey string, group *Group) *GroupsPatchCall { + c := &GroupsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + c.group = group + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GroupsPatchCall) Fields(s ...googleapi.Field) *GroupsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GroupsPatchCall) Context(ctx context.Context) *GroupsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GroupsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GroupsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.groups.patch" call. +// Exactly one of *Group or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Group.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GroupsPatchCall) Do(opts ...googleapi.CallOption) (*Group, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Group{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a group's properties. This method supports [patch semantics](/admin-sdk/directory/v1/guides/performance#patch).", + // "flatPath": "admin/directory/v1/groups/{groupKey}", + // "httpMethod": "PATCH", + // "id": "directory.groups.patch", + // "parameterOrder": [ + // "groupKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}", + // "request": { + // "$ref": "Group" + // }, + // "response": { + // "$ref": "Group" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group" + // ] + // } + +} + +// method id "directory.groups.update": + +type GroupsUpdateCall struct { + s *Service + groupKey string + group *Group + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a group's properties. +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +func (r *GroupsService) Update(groupKey string, group *Group) *GroupsUpdateCall { + c := &GroupsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + c.group = group + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GroupsUpdateCall) Fields(s ...googleapi.Field) *GroupsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GroupsUpdateCall) Context(ctx context.Context) *GroupsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GroupsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GroupsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.groups.update" call. +// Exactly one of *Group or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Group.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GroupsUpdateCall) Do(opts ...googleapi.CallOption) (*Group, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Group{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a group's properties.", + // "flatPath": "admin/directory/v1/groups/{groupKey}", + // "httpMethod": "PUT", + // "id": "directory.groups.update", + // "parameterOrder": [ + // "groupKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}", + // "request": { + // "$ref": "Group" + // }, + // "response": { + // "$ref": "Group" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group" + // ] + // } + +} + +// method id "directory.groups.aliases.delete": + +type GroupsAliasesDeleteCall struct { + s *Service + groupKey string + alias string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Removes an alias. +// +// - alias: The alias to be removed. +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +func (r *GroupsAliasesService) Delete(groupKey string, alias string) *GroupsAliasesDeleteCall { + c := &GroupsAliasesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + c.alias = alias + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GroupsAliasesDeleteCall) Fields(s ...googleapi.Field) *GroupsAliasesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GroupsAliasesDeleteCall) Context(ctx context.Context) *GroupsAliasesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GroupsAliasesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GroupsAliasesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}/aliases/{alias}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + "alias": c.alias, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.groups.aliases.delete" call. +func (c *GroupsAliasesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Removes an alias.", + // "flatPath": "admin/directory/v1/groups/{groupKey}/aliases/{alias}", + // "httpMethod": "DELETE", + // "id": "directory.groups.aliases.delete", + // "parameterOrder": [ + // "groupKey", + // "alias" + // ], + // "parameters": { + // "alias": { + // "description": "The alias to be removed", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}/aliases/{alias}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group" + // ] + // } + +} + +// method id "directory.groups.aliases.insert": + +type GroupsAliasesInsertCall struct { + s *Service + groupKey string + alias *Alias + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Adds an alias for the group. +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +func (r *GroupsAliasesService) Insert(groupKey string, alias *Alias) *GroupsAliasesInsertCall { + c := &GroupsAliasesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + c.alias = alias + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GroupsAliasesInsertCall) Fields(s ...googleapi.Field) *GroupsAliasesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GroupsAliasesInsertCall) Context(ctx context.Context) *GroupsAliasesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GroupsAliasesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GroupsAliasesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.alias) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}/aliases") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.groups.aliases.insert" call. +// Exactly one of *Alias or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Alias.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GroupsAliasesInsertCall) Do(opts ...googleapi.CallOption) (*Alias, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Alias{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds an alias for the group.", + // "flatPath": "admin/directory/v1/groups/{groupKey}/aliases", + // "httpMethod": "POST", + // "id": "directory.groups.aliases.insert", + // "parameterOrder": [ + // "groupKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}/aliases", + // "request": { + // "$ref": "Alias" + // }, + // "response": { + // "$ref": "Alias" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group" + // ] + // } + +} + +// method id "directory.groups.aliases.list": + +type GroupsAliasesListCall struct { + s *Service + groupKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all aliases for a group. +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +func (r *GroupsAliasesService) List(groupKey string) *GroupsAliasesListCall { + c := &GroupsAliasesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GroupsAliasesListCall) Fields(s ...googleapi.Field) *GroupsAliasesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GroupsAliasesListCall) IfNoneMatch(entityTag string) *GroupsAliasesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GroupsAliasesListCall) Context(ctx context.Context) *GroupsAliasesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GroupsAliasesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GroupsAliasesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}/aliases") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.groups.aliases.list" call. +// Exactly one of *Aliases or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Aliases.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GroupsAliasesListCall) Do(opts ...googleapi.CallOption) (*Aliases, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Aliases{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all aliases for a group.", + // "flatPath": "admin/directory/v1/groups/{groupKey}/aliases", + // "httpMethod": "GET", + // "id": "directory.groups.aliases.list", + // "parameterOrder": [ + // "groupKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}/aliases", + // "response": { + // "$ref": "Aliases" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group", + // "https://www.googleapis.com/auth/admin.directory.group.readonly" + // ] + // } + +} + +// method id "directory.members.delete": + +type MembersDeleteCall struct { + s *Service + groupKey string + memberKey string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Removes a member from a group. +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +// - memberKey: Identifies the group member in the API request. A group +// member can be a user or another group. The value can be the +// member's (group or user) primary email address, alias, or unique +// ID. +func (r *MembersService) Delete(groupKey string, memberKey string) *MembersDeleteCall { + c := &MembersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + c.memberKey = memberKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MembersDeleteCall) Fields(s ...googleapi.Field) *MembersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MembersDeleteCall) Context(ctx context.Context) *MembersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MembersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MembersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}/members/{memberKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + "memberKey": c.memberKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.members.delete" call. +func (c *MembersDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Removes a member from a group.", + // "flatPath": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + // "httpMethod": "DELETE", + // "id": "directory.members.delete", + // "parameterOrder": [ + // "groupKey", + // "memberKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "memberKey": { + // "description": "Identifies the group member in the API request. A group member can be a user or another group. The value can be the member's (group or user) primary email address, alias, or unique ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group", + // "https://www.googleapis.com/auth/admin.directory.group.member" + // ] + // } + +} + +// method id "directory.members.get": + +type MembersGetCall struct { + s *Service + groupKey string + memberKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a group member's properties. +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +// - memberKey: Identifies the group member in the API request. A group +// member can be a user or another group. The value can be the +// member's (group or user) primary email address, alias, or unique +// ID. +func (r *MembersService) Get(groupKey string, memberKey string) *MembersGetCall { + c := &MembersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + c.memberKey = memberKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MembersGetCall) Fields(s ...googleapi.Field) *MembersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MembersGetCall) IfNoneMatch(entityTag string) *MembersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MembersGetCall) Context(ctx context.Context) *MembersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MembersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MembersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}/members/{memberKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + "memberKey": c.memberKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.members.get" call. +// Exactly one of *Member or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Member.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *MembersGetCall) Do(opts ...googleapi.CallOption) (*Member, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Member{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a group member's properties.", + // "flatPath": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + // "httpMethod": "GET", + // "id": "directory.members.get", + // "parameterOrder": [ + // "groupKey", + // "memberKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "memberKey": { + // "description": "Identifies the group member in the API request. A group member can be a user or another group. The value can be the member's (group or user) primary email address, alias, or unique ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + // "response": { + // "$ref": "Member" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group", + // "https://www.googleapis.com/auth/admin.directory.group.member", + // "https://www.googleapis.com/auth/admin.directory.group.member.readonly", + // "https://www.googleapis.com/auth/admin.directory.group.readonly" + // ] + // } + +} + +// method id "directory.members.hasMember": + +type MembersHasMemberCall struct { + s *Service + groupKey string + memberKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// HasMember: Checks whether the given user is a member of the group. +// Membership can be direct or nested, but if nested, the `memberKey` +// and `groupKey` must be entities in the same domain or an `Invalid +// input` error is returned. To check for nested memberships that +// include entities outside of the group's domain, use the +// `checkTransitiveMembership()` +// (https://cloud.google.com/identity/docs/reference/rest/v1/groups.memberships/checkTransitiveMembership) +// method in the Cloud Identity Groups API. +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +// - memberKey: Identifies the user member in the API request. The value +// can be the user's primary email address, alias, or unique ID. +func (r *MembersService) HasMember(groupKey string, memberKey string) *MembersHasMemberCall { + c := &MembersHasMemberCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + c.memberKey = memberKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MembersHasMemberCall) Fields(s ...googleapi.Field) *MembersHasMemberCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MembersHasMemberCall) IfNoneMatch(entityTag string) *MembersHasMemberCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MembersHasMemberCall) Context(ctx context.Context) *MembersHasMemberCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MembersHasMemberCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MembersHasMemberCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}/hasMember/{memberKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + "memberKey": c.memberKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.members.hasMember" call. +// Exactly one of *MembersHasMember or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *MembersHasMember.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MembersHasMemberCall) Do(opts ...googleapi.CallOption) (*MembersHasMember, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &MembersHasMember{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Checks whether the given user is a member of the group. Membership can be direct or nested, but if nested, the `memberKey` and `groupKey` must be entities in the same domain or an `Invalid input` error is returned. To check for nested memberships that include entities outside of the group's domain, use the [`checkTransitiveMembership()`](https://cloud.google.com/identity/docs/reference/rest/v1/groups.memberships/checkTransitiveMembership) method in the Cloud Identity Groups API.", + // "flatPath": "admin/directory/v1/groups/{groupKey}/hasMember/{memberKey}", + // "httpMethod": "GET", + // "id": "directory.members.hasMember", + // "parameterOrder": [ + // "groupKey", + // "memberKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "memberKey": { + // "description": "Identifies the user member in the API request. The value can be the user's primary email address, alias, or unique ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}/hasMember/{memberKey}", + // "response": { + // "$ref": "MembersHasMember" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group", + // "https://www.googleapis.com/auth/admin.directory.group.member", + // "https://www.googleapis.com/auth/admin.directory.group.member.readonly", + // "https://www.googleapis.com/auth/admin.directory.group.readonly" + // ] + // } + +} + +// method id "directory.members.insert": + +type MembersInsertCall struct { + s *Service + groupKey string + member *Member + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Adds a user to the specified group. +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +func (r *MembersService) Insert(groupKey string, member *Member) *MembersInsertCall { + c := &MembersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + c.member = member + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MembersInsertCall) Fields(s ...googleapi.Field) *MembersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MembersInsertCall) Context(ctx context.Context) *MembersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MembersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MembersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.member) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}/members") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.members.insert" call. +// Exactly one of *Member or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Member.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *MembersInsertCall) Do(opts ...googleapi.CallOption) (*Member, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Member{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a user to the specified group.", + // "flatPath": "admin/directory/v1/groups/{groupKey}/members", + // "httpMethod": "POST", + // "id": "directory.members.insert", + // "parameterOrder": [ + // "groupKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}/members", + // "request": { + // "$ref": "Member" + // }, + // "response": { + // "$ref": "Member" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group", + // "https://www.googleapis.com/auth/admin.directory.group.member" + // ] + // } + +} + +// method id "directory.members.list": + +type MembersListCall struct { + s *Service + groupKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a paginated list of all members in a group. This +// method times out after 60 minutes. For more information, see +// Troubleshoot error codes +// (https://developers.google.com/admin-sdk/directory/v1/guides/troubleshoot-error-codes). +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +func (r *MembersService) List(groupKey string) *MembersListCall { + c := &MembersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + return c +} + +// IncludeDerivedMembership sets the optional parameter +// "includeDerivedMembership": Whether to list indirect memberships. +// Default: false. +func (c *MembersListCall) IncludeDerivedMembership(includeDerivedMembership bool) *MembersListCall { + c.urlParams_.Set("includeDerivedMembership", fmt.Sprint(includeDerivedMembership)) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. Max allowed value is 200. +func (c *MembersListCall) MaxResults(maxResults int64) *MembersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// next page in the list. +func (c *MembersListCall) PageToken(pageToken string) *MembersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Roles sets the optional parameter "roles": The `roles` query +// parameter allows you to retrieve group members by role. Allowed +// values are `OWNER`, `MANAGER`, and `MEMBER`. +func (c *MembersListCall) Roles(roles string) *MembersListCall { + c.urlParams_.Set("roles", roles) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MembersListCall) Fields(s ...googleapi.Field) *MembersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MembersListCall) IfNoneMatch(entityTag string) *MembersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MembersListCall) Context(ctx context.Context) *MembersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MembersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MembersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}/members") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.members.list" call. +// Exactly one of *Members or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Members.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *MembersListCall) Do(opts ...googleapi.CallOption) (*Members, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Members{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a paginated list of all members in a group. This method times out after 60 minutes. For more information, see [Troubleshoot error codes](https://developers.google.com/admin-sdk/directory/v1/guides/troubleshoot-error-codes).", + // "flatPath": "admin/directory/v1/groups/{groupKey}/members", + // "httpMethod": "GET", + // "id": "directory.members.list", + // "parameterOrder": [ + // "groupKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeDerivedMembership": { + // "description": "Whether to list indirect memberships. Default: false.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "200", + // "description": "Maximum number of results to return. Max allowed value is 200.", + // "format": "int32", + // "location": "query", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Token to specify next page in the list.", + // "location": "query", + // "type": "string" + // }, + // "roles": { + // "description": "The `roles` query parameter allows you to retrieve group members by role. Allowed values are `OWNER`, `MANAGER`, and `MEMBER`.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}/members", + // "response": { + // "$ref": "Members" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group", + // "https://www.googleapis.com/auth/admin.directory.group.member", + // "https://www.googleapis.com/auth/admin.directory.group.member.readonly", + // "https://www.googleapis.com/auth/admin.directory.group.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MembersListCall) Pages(ctx context.Context, f func(*Members) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "directory.members.patch": + +type MembersPatchCall struct { + s *Service + groupKey string + memberKey string + member *Member + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the membership properties of a user in the specified +// group. This method supports patch semantics +// (/admin-sdk/directory/v1/guides/performance#patch). +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +// - memberKey: Identifies the group member in the API request. A group +// member can be a user or another group. The value can be the +// member's (group or user) primary email address, alias, or unique +// ID. +func (r *MembersService) Patch(groupKey string, memberKey string, member *Member) *MembersPatchCall { + c := &MembersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + c.memberKey = memberKey + c.member = member + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MembersPatchCall) Fields(s ...googleapi.Field) *MembersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MembersPatchCall) Context(ctx context.Context) *MembersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MembersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MembersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.member) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}/members/{memberKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + "memberKey": c.memberKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.members.patch" call. +// Exactly one of *Member or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Member.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *MembersPatchCall) Do(opts ...googleapi.CallOption) (*Member, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Member{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the membership properties of a user in the specified group. This method supports [patch semantics](/admin-sdk/directory/v1/guides/performance#patch).", + // "flatPath": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + // "httpMethod": "PATCH", + // "id": "directory.members.patch", + // "parameterOrder": [ + // "groupKey", + // "memberKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "memberKey": { + // "description": "Identifies the group member in the API request. A group member can be a user or another group. The value can be the member's (group or user) primary email address, alias, or unique ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + // "request": { + // "$ref": "Member" + // }, + // "response": { + // "$ref": "Member" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group", + // "https://www.googleapis.com/auth/admin.directory.group.member" + // ] + // } + +} + +// method id "directory.members.update": + +type MembersUpdateCall struct { + s *Service + groupKey string + memberKey string + member *Member + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the membership of a user in the specified group. +// +// - groupKey: Identifies the group in the API request. The value can be +// the group's email address, group alias, or the unique group ID. +// - memberKey: Identifies the group member in the API request. A group +// member can be a user or another group. The value can be the +// member's (group or user) primary email address, alias, or unique +// ID. +func (r *MembersService) Update(groupKey string, memberKey string, member *Member) *MembersUpdateCall { + c := &MembersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.groupKey = groupKey + c.memberKey = memberKey + c.member = member + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MembersUpdateCall) Fields(s ...googleapi.Field) *MembersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MembersUpdateCall) Context(ctx context.Context) *MembersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MembersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MembersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.member) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/groups/{groupKey}/members/{memberKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "groupKey": c.groupKey, + "memberKey": c.memberKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.members.update" call. +// Exactly one of *Member or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Member.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *MembersUpdateCall) Do(opts ...googleapi.CallOption) (*Member, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Member{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the membership of a user in the specified group.", + // "flatPath": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + // "httpMethod": "PUT", + // "id": "directory.members.update", + // "parameterOrder": [ + // "groupKey", + // "memberKey" + // ], + // "parameters": { + // "groupKey": { + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "memberKey": { + // "description": "Identifies the group member in the API request. A group member can be a user or another group. The value can be the member's (group or user) primary email address, alias, or unique ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/groups/{groupKey}/members/{memberKey}", + // "request": { + // "$ref": "Member" + // }, + // "response": { + // "$ref": "Member" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.group", + // "https://www.googleapis.com/auth/admin.directory.group.member" + // ] + // } + +} + +// method id "directory.mobiledevices.action": + +type MobiledevicesActionCall struct { + s *Service + customerId string + resourceId string + mobiledeviceaction *MobileDeviceAction + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Action: Takes an action that affects a mobile device. For example, +// remotely wiping a device. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - resourceId: The unique ID the API service uses to identify the +// mobile device. +func (r *MobiledevicesService) Action(customerId string, resourceId string, mobiledeviceaction *MobileDeviceAction) *MobiledevicesActionCall { + c := &MobiledevicesActionCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.resourceId = resourceId + c.mobiledeviceaction = mobiledeviceaction + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MobiledevicesActionCall) Fields(s ...googleapi.Field) *MobiledevicesActionCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MobiledevicesActionCall) Context(ctx context.Context) *MobiledevicesActionCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MobiledevicesActionCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MobiledevicesActionCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.mobiledeviceaction) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}/action") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "resourceId": c.resourceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.mobiledevices.action" call. +func (c *MobiledevicesActionCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Takes an action that affects a mobile device. For example, remotely wiping a device.", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}/action", + // "httpMethod": "POST", + // "id": "directory.mobiledevices.action", + // "parameterOrder": [ + // "customerId", + // "resourceId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "resourceId": { + // "description": "The unique ID the API service uses to identify the mobile device.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}/action", + // "request": { + // "$ref": "MobileDeviceAction" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.mobile", + // "https://www.googleapis.com/auth/admin.directory.device.mobile.action" + // ] + // } + +} + +// method id "directory.mobiledevices.delete": + +type MobiledevicesDeleteCall struct { + s *Service + customerId string + resourceId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Removes a mobile device. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - resourceId: The unique ID the API service uses to identify the +// mobile device. +func (r *MobiledevicesService) Delete(customerId string, resourceId string) *MobiledevicesDeleteCall { + c := &MobiledevicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.resourceId = resourceId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MobiledevicesDeleteCall) Fields(s ...googleapi.Field) *MobiledevicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MobiledevicesDeleteCall) Context(ctx context.Context) *MobiledevicesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MobiledevicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MobiledevicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "resourceId": c.resourceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.mobiledevices.delete" call. +func (c *MobiledevicesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Removes a mobile device.", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}", + // "httpMethod": "DELETE", + // "id": "directory.mobiledevices.delete", + // "parameterOrder": [ + // "customerId", + // "resourceId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "resourceId": { + // "description": "The unique ID the API service uses to identify the mobile device.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.mobile" + // ] + // } + +} + +// method id "directory.mobiledevices.get": + +type MobiledevicesGetCall struct { + s *Service + customerId string + resourceId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a mobile device's properties. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - resourceId: The unique ID the API service uses to identify the +// mobile device. +func (r *MobiledevicesService) Get(customerId string, resourceId string) *MobiledevicesGetCall { + c := &MobiledevicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.resourceId = resourceId + return c +} + +// Projection sets the optional parameter "projection": Restrict +// information returned to a set of selected fields. +// +// Possible values: +// +// "BASIC" - Includes only the basic metadata fields (e.g., deviceId, +// +// model, status, type, and status) +// +// "FULL" - Includes all metadata fields +func (c *MobiledevicesGetCall) Projection(projection string) *MobiledevicesGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MobiledevicesGetCall) Fields(s ...googleapi.Field) *MobiledevicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MobiledevicesGetCall) IfNoneMatch(entityTag string) *MobiledevicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MobiledevicesGetCall) Context(ctx context.Context) *MobiledevicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MobiledevicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MobiledevicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "resourceId": c.resourceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.mobiledevices.get" call. +// Exactly one of *MobileDevice or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MobileDevice.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *MobiledevicesGetCall) Do(opts ...googleapi.CallOption) (*MobileDevice, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &MobileDevice{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a mobile device's properties.", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}", + // "httpMethod": "GET", + // "id": "directory.mobiledevices.get", + // "parameterOrder": [ + // "customerId", + // "resourceId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Restrict information returned to a set of selected fields.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "enumDescriptions": [ + // "Includes only the basic metadata fields (e.g., deviceId, model, status, type, and status)", + // "Includes all metadata fields" + // ], + // "location": "query", + // "type": "string" + // }, + // "resourceId": { + // "description": "The unique ID the API service uses to identify the mobile device.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/mobile/{resourceId}", + // "response": { + // "$ref": "MobileDevice" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.mobile", + // "https://www.googleapis.com/auth/admin.directory.device.mobile.action", + // "https://www.googleapis.com/auth/admin.directory.device.mobile.readonly" + // ] + // } + +} + +// method id "directory.mobiledevices.list": + +type MobiledevicesListCall struct { + s *Service + customerId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a paginated list of all user-owned mobile devices for +// an account. To retrieve a list that includes company-owned devices, +// use the Cloud Identity Devices API +// (https://cloud.google.com/identity/docs/concepts/overview-devices) +// instead. This method times out after 60 minutes. For more +// information, see Troubleshoot error codes +// (https://developers.google.com/admin-sdk/directory/v1/guides/troubleshoot-error-codes). +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +func (r *MobiledevicesService) List(customerId string) *MobiledevicesListCall { + c := &MobiledevicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. Max allowed value is 100. +func (c *MobiledevicesListCall) MaxResults(maxResults int64) *MobiledevicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Device property to use +// for sorting results. +// +// Possible values: +// +// "deviceId" - The serial number for a Google Sync mobile device. For +// +// Android devices, this is a software generated unique identifier. +// +// "email" - The device owner's email address. +// "lastSync" - Last policy settings sync date time of the device. +// "model" - The mobile device's model. +// "name" - The device owner's user name. +// "os" - The device's operating system. +// "status" - The device status. +// "type" - Type of the device. +func (c *MobiledevicesListCall) OrderBy(orderBy string) *MobiledevicesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// next page in the list +func (c *MobiledevicesListCall) PageToken(pageToken string) *MobiledevicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Projection sets the optional parameter "projection": Restrict +// information returned to a set of selected fields. +// +// Possible values: +// +// "BASIC" - Includes only the basic metadata fields (e.g., deviceId, +// +// model, status, type, and status) +// +// "FULL" - Includes all metadata fields +func (c *MobiledevicesListCall) Projection(projection string) *MobiledevicesListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Query sets the optional parameter "query": Search string in the +// format given at +// https://developers.google.com/admin-sdk/directory/v1/search-operators +func (c *MobiledevicesListCall) Query(query string) *MobiledevicesListCall { + c.urlParams_.Set("query", query) + return c +} + +// SortOrder sets the optional parameter "sortOrder": Whether to return +// results in ascending or descending order. Must be used with the +// `orderBy` parameter. +// +// Possible values: +// +// "ASCENDING" - Ascending order. +// "DESCENDING" - Descending order. +func (c *MobiledevicesListCall) SortOrder(sortOrder string) *MobiledevicesListCall { + c.urlParams_.Set("sortOrder", sortOrder) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MobiledevicesListCall) Fields(s ...googleapi.Field) *MobiledevicesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MobiledevicesListCall) IfNoneMatch(entityTag string) *MobiledevicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MobiledevicesListCall) Context(ctx context.Context) *MobiledevicesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MobiledevicesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MobiledevicesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/devices/mobile") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.mobiledevices.list" call. +// Exactly one of *MobileDevices or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MobileDevices.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MobiledevicesListCall) Do(opts ...googleapi.CallOption) (*MobileDevices, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &MobileDevices{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a paginated list of all user-owned mobile devices for an account. To retrieve a list that includes company-owned devices, use the Cloud Identity [Devices API](https://cloud.google.com/identity/docs/concepts/overview-devices) instead. This method times out after 60 minutes. For more information, see [Troubleshoot error codes](https://developers.google.com/admin-sdk/directory/v1/guides/troubleshoot-error-codes).", + // "flatPath": "admin/directory/v1/customer/{customerId}/devices/mobile", + // "httpMethod": "GET", + // "id": "directory.mobiledevices.list", + // "parameterOrder": [ + // "customerId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "100", + // "description": "Maximum number of results to return. Max allowed value is 100.", + // "format": "int32", + // "location": "query", + // "maximum": "100", + // "minimum": "1", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Device property to use for sorting results.", + // "enum": [ + // "deviceId", + // "email", + // "lastSync", + // "model", + // "name", + // "os", + // "status", + // "type" + // ], + // "enumDescriptions": [ + // "The serial number for a Google Sync mobile device. For Android devices, this is a software generated unique identifier.", + // "The device owner's email address.", + // "Last policy settings sync date time of the device.", + // "The mobile device's model.", + // "The device owner's user name.", + // "The device's operating system.", + // "The device status.", + // "Type of the device." + // ], + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Token to specify next page in the list", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Restrict information returned to a set of selected fields.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "enumDescriptions": [ + // "Includes only the basic metadata fields (e.g., deviceId, model, status, type, and status)", + // "Includes all metadata fields" + // ], + // "location": "query", + // "type": "string" + // }, + // "query": { + // "description": "Search string in the format given at https://developers.google.com/admin-sdk/directory/v1/search-operators", + // "location": "query", + // "type": "string" + // }, + // "sortOrder": { + // "description": "Whether to return results in ascending or descending order. Must be used with the `orderBy` parameter.", + // "enum": [ + // "ASCENDING", + // "DESCENDING" + // ], + // "enumDescriptions": [ + // "Ascending order.", + // "Descending order." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/devices/mobile", + // "response": { + // "$ref": "MobileDevices" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.device.mobile", + // "https://www.googleapis.com/auth/admin.directory.device.mobile.action", + // "https://www.googleapis.com/auth/admin.directory.device.mobile.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MobiledevicesListCall) Pages(ctx context.Context, f func(*MobileDevices) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "directory.orgunits.delete": + +type OrgunitsDeleteCall struct { + s *Service + customerId string + orgUnitPath string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Removes an organizational unit. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - orgUnitPath: The full path of the organizational unit (minus the +// leading `/`) or its unique ID. +func (r *OrgunitsService) Delete(customerId string, orgUnitPath string) *OrgunitsDeleteCall { + c := &OrgunitsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.orgUnitPath = orgUnitPath + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrgunitsDeleteCall) Fields(s ...googleapi.Field) *OrgunitsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrgunitsDeleteCall) Context(ctx context.Context) *OrgunitsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrgunitsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrgunitsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "orgUnitPath": c.orgUnitPath, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.orgunits.delete" call. +func (c *OrgunitsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Removes an organizational unit.", + // "flatPath": "admin/directory/v1/customer/{customerId}/orgunits/{orgunitsId}", + // "httpMethod": "DELETE", + // "id": "directory.orgunits.delete", + // "parameterOrder": [ + // "customerId", + // "orgUnitPath" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "orgUnitPath": { + // "description": "The full path of the organizational unit (minus the leading `/`) or its unique ID.", + // "location": "path", + // "pattern": "^.*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.orgunit" + // ] + // } + +} + +// method id "directory.orgunits.get": + +type OrgunitsGetCall struct { + s *Service + customerId string + orgUnitPath string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an organizational unit. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - orgUnitPath: The full path of the organizational unit (minus the +// leading `/`) or its unique ID. +func (r *OrgunitsService) Get(customerId string, orgUnitPath string) *OrgunitsGetCall { + c := &OrgunitsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.orgUnitPath = orgUnitPath + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrgunitsGetCall) Fields(s ...googleapi.Field) *OrgunitsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrgunitsGetCall) IfNoneMatch(entityTag string) *OrgunitsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrgunitsGetCall) Context(ctx context.Context) *OrgunitsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrgunitsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrgunitsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "orgUnitPath": c.orgUnitPath, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.orgunits.get" call. +// Exactly one of *OrgUnit or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *OrgUnit.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OrgunitsGetCall) Do(opts ...googleapi.CallOption) (*OrgUnit, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &OrgUnit{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an organizational unit.", + // "flatPath": "admin/directory/v1/customer/{customerId}/orgunits/{orgunitsId}", + // "httpMethod": "GET", + // "id": "directory.orgunits.get", + // "parameterOrder": [ + // "customerId", + // "orgUnitPath" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "orgUnitPath": { + // "description": "The full path of the organizational unit (minus the leading `/`) or its unique ID.", + // "location": "path", + // "pattern": "^.*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}", + // "response": { + // "$ref": "OrgUnit" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.orgunit", + // "https://www.googleapis.com/auth/admin.directory.orgunit.readonly" + // ] + // } + +} + +// method id "directory.orgunits.insert": + +type OrgunitsInsertCall struct { + s *Service + customerId string + orgunit *OrgUnit + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Adds an organizational unit. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +func (r *OrgunitsService) Insert(customerId string, orgunit *OrgUnit) *OrgunitsInsertCall { + c := &OrgunitsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.orgunit = orgunit + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrgunitsInsertCall) Fields(s ...googleapi.Field) *OrgunitsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrgunitsInsertCall) Context(ctx context.Context) *OrgunitsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrgunitsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrgunitsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.orgunit) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/orgunits") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.orgunits.insert" call. +// Exactly one of *OrgUnit or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *OrgUnit.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OrgunitsInsertCall) Do(opts ...googleapi.CallOption) (*OrgUnit, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &OrgUnit{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds an organizational unit.", + // "flatPath": "admin/directory/v1/customer/{customerId}/orgunits", + // "httpMethod": "POST", + // "id": "directory.orgunits.insert", + // "parameterOrder": [ + // "customerId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/orgunits", + // "request": { + // "$ref": "OrgUnit" + // }, + // "response": { + // "$ref": "OrgUnit" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.orgunit" + // ] + // } + +} + +// method id "directory.orgunits.list": + +type OrgunitsListCall struct { + s *Service + customerId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of all organizational units for an account. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +func (r *OrgunitsService) List(customerId string) *OrgunitsListCall { + c := &OrgunitsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + return c +} + +// OrgUnitPath sets the optional parameter "orgUnitPath": The full path +// to the organizational unit or its unique ID. Returns the children of +// the specified organizational unit. +func (c *OrgunitsListCall) OrgUnitPath(orgUnitPath string) *OrgunitsListCall { + c.urlParams_.Set("orgUnitPath", orgUnitPath) + return c +} + +// Type sets the optional parameter "type": Whether to return all +// sub-organizations or just immediate children. +// +// Possible values: +// +// "all" - All sub-organizational units. +// "children" - Immediate children only (default). +func (c *OrgunitsListCall) Type(type_ string) *OrgunitsListCall { + c.urlParams_.Set("type", type_) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrgunitsListCall) Fields(s ...googleapi.Field) *OrgunitsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrgunitsListCall) IfNoneMatch(entityTag string) *OrgunitsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrgunitsListCall) Context(ctx context.Context) *OrgunitsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrgunitsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrgunitsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/orgunits") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.orgunits.list" call. +// Exactly one of *OrgUnits or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *OrgUnits.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrgunitsListCall) Do(opts ...googleapi.CallOption) (*OrgUnits, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &OrgUnits{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of all organizational units for an account.", + // "flatPath": "admin/directory/v1/customer/{customerId}/orgunits", + // "httpMethod": "GET", + // "id": "directory.orgunits.list", + // "parameterOrder": [ + // "customerId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "orgUnitPath": { + // "default": "", + // "description": "The full path to the organizational unit or its unique ID. Returns the children of the specified organizational unit.", + // "location": "query", + // "type": "string" + // }, + // "type": { + // "description": "Whether to return all sub-organizations or just immediate children.", + // "enum": [ + // "all", + // "children" + // ], + // "enumDescriptions": [ + // "All sub-organizational units.", + // "Immediate children only (default)." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/orgunits", + // "response": { + // "$ref": "OrgUnits" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.orgunit", + // "https://www.googleapis.com/auth/admin.directory.orgunit.readonly" + // ] + // } + +} + +// method id "directory.orgunits.patch": + +type OrgunitsPatchCall struct { + s *Service + customerId string + orgUnitPath string + orgunit *OrgUnit + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an organizational unit. This method supports patch +// semantics (/admin-sdk/directory/v1/guides/performance#patch) +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - orgUnitPath: The full path of the organizational unit (minus the +// leading `/`) or its unique ID. +func (r *OrgunitsService) Patch(customerId string, orgUnitPath string, orgunit *OrgUnit) *OrgunitsPatchCall { + c := &OrgunitsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.orgUnitPath = orgUnitPath + c.orgunit = orgunit + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrgunitsPatchCall) Fields(s ...googleapi.Field) *OrgunitsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrgunitsPatchCall) Context(ctx context.Context) *OrgunitsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrgunitsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrgunitsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.orgunit) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "orgUnitPath": c.orgUnitPath, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.orgunits.patch" call. +// Exactly one of *OrgUnit or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *OrgUnit.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OrgunitsPatchCall) Do(opts ...googleapi.CallOption) (*OrgUnit, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &OrgUnit{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an organizational unit. This method supports [patch semantics](/admin-sdk/directory/v1/guides/performance#patch)", + // "flatPath": "admin/directory/v1/customer/{customerId}/orgunits/{orgunitsId}", + // "httpMethod": "PATCH", + // "id": "directory.orgunits.patch", + // "parameterOrder": [ + // "customerId", + // "orgUnitPath" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "orgUnitPath": { + // "description": "The full path of the organizational unit (minus the leading `/`) or its unique ID.", + // "location": "path", + // "pattern": "^.*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}", + // "request": { + // "$ref": "OrgUnit" + // }, + // "response": { + // "$ref": "OrgUnit" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.orgunit" + // ] + // } + +} + +// method id "directory.orgunits.update": + +type OrgunitsUpdateCall struct { + s *Service + customerId string + orgUnitPath string + orgunit *OrgUnit + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an organizational unit. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's `customerId`. The +// `customerId` is also returned as part of the Users resource +// (/admin-sdk/directory/v1/reference/users). +// - orgUnitPath: The full path of the organizational unit (minus the +// leading `/`) or its unique ID. +func (r *OrgunitsService) Update(customerId string, orgUnitPath string, orgunit *OrgUnit) *OrgunitsUpdateCall { + c := &OrgunitsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.orgUnitPath = orgUnitPath + c.orgunit = orgunit + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrgunitsUpdateCall) Fields(s ...googleapi.Field) *OrgunitsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrgunitsUpdateCall) Context(ctx context.Context) *OrgunitsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrgunitsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrgunitsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.orgunit) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "orgUnitPath": c.orgUnitPath, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.orgunits.update" call. +// Exactly one of *OrgUnit or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *OrgUnit.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OrgunitsUpdateCall) Do(opts ...googleapi.CallOption) (*OrgUnit, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &OrgUnit{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an organizational unit.", + // "flatPath": "admin/directory/v1/customer/{customerId}/orgunits/{orgunitsId}", + // "httpMethod": "PUT", + // "id": "directory.orgunits.update", + // "parameterOrder": [ + // "customerId", + // "orgUnitPath" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users resource](/admin-sdk/directory/v1/reference/users).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "orgUnitPath": { + // "description": "The full path of the organizational unit (minus the leading `/`) or its unique ID.", + // "location": "path", + // "pattern": "^.*$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/orgunits/{+orgUnitPath}", + // "request": { + // "$ref": "OrgUnit" + // }, + // "response": { + // "$ref": "OrgUnit" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.orgunit" + // ] + // } + +} + +// method id "directory.privileges.list": + +type PrivilegesListCall struct { + s *Service + customer string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a paginated list of all privileges for a customer. +// +// - customer: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +func (r *PrivilegesService) List(customer string) *PrivilegesListCall { + c := &PrivilegesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PrivilegesListCall) Fields(s ...googleapi.Field) *PrivilegesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *PrivilegesListCall) IfNoneMatch(entityTag string) *PrivilegesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PrivilegesListCall) Context(ctx context.Context) *PrivilegesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PrivilegesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PrivilegesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roles/ALL/privileges") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.privileges.list" call. +// Exactly one of *Privileges or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Privileges.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PrivilegesListCall) Do(opts ...googleapi.CallOption) (*Privileges, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Privileges{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a paginated list of all privileges for a customer.", + // "flatPath": "admin/directory/v1/customer/{customer}/roles/ALL/privileges", + // "httpMethod": "GET", + // "id": "directory.privileges.list", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roles/ALL/privileges", + // "response": { + // "$ref": "Privileges" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement", + // "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + // ] + // } + +} + +// method id "directory.resources.buildings.delete": + +type ResourcesBuildingsDeleteCall struct { + s *Service + customer string + buildingId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a building. +// +// - buildingId: The id of the building to delete. +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesBuildingsService) Delete(customer string, buildingId string) *ResourcesBuildingsDeleteCall { + c := &ResourcesBuildingsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.buildingId = buildingId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesBuildingsDeleteCall) Fields(s ...googleapi.Field) *ResourcesBuildingsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesBuildingsDeleteCall) Context(ctx context.Context) *ResourcesBuildingsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesBuildingsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesBuildingsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "buildingId": c.buildingId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.buildings.delete" call. +func (c *ResourcesBuildingsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes a building.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + // "httpMethod": "DELETE", + // "id": "directory.resources.buildings.delete", + // "parameterOrder": [ + // "customer", + // "buildingId" + // ], + // "parameters": { + // "buildingId": { + // "description": "The id of the building to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.buildings.get": + +type ResourcesBuildingsGetCall struct { + s *Service + customer string + buildingId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a building. +// +// - buildingId: The unique ID of the building to retrieve. +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesBuildingsService) Get(customer string, buildingId string) *ResourcesBuildingsGetCall { + c := &ResourcesBuildingsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.buildingId = buildingId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesBuildingsGetCall) Fields(s ...googleapi.Field) *ResourcesBuildingsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcesBuildingsGetCall) IfNoneMatch(entityTag string) *ResourcesBuildingsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesBuildingsGetCall) Context(ctx context.Context) *ResourcesBuildingsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesBuildingsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesBuildingsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "buildingId": c.buildingId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.buildings.get" call. +// Exactly one of *Building or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Building.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ResourcesBuildingsGetCall) Do(opts ...googleapi.CallOption) (*Building, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Building{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a building.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + // "httpMethod": "GET", + // "id": "directory.resources.buildings.get", + // "parameterOrder": [ + // "customer", + // "buildingId" + // ], + // "parameters": { + // "buildingId": { + // "description": "The unique ID of the building to retrieve.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + // "response": { + // "$ref": "Building" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar", + // "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + // ] + // } + +} + +// method id "directory.resources.buildings.insert": + +type ResourcesBuildingsInsertCall struct { + s *Service + customer string + building *Building + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Inserts a building. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesBuildingsService) Insert(customer string, building *Building) *ResourcesBuildingsInsertCall { + c := &ResourcesBuildingsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.building = building + return c +} + +// CoordinatesSource sets the optional parameter "coordinatesSource": +// Source from which Building.coordinates are derived. +// +// Possible values: +// +// "CLIENT_SPECIFIED" - Building.coordinates are set to the +// +// coordinates included in the request. +// +// "RESOLVED_FROM_ADDRESS" - Building.coordinates are automatically +// +// populated based on the postal address. +// +// "SOURCE_UNSPECIFIED" (default) - Defaults to +// +// `RESOLVED_FROM_ADDRESS` if postal address is provided. Otherwise, +// defaults to `CLIENT_SPECIFIED` if coordinates are provided. +func (c *ResourcesBuildingsInsertCall) CoordinatesSource(coordinatesSource string) *ResourcesBuildingsInsertCall { + c.urlParams_.Set("coordinatesSource", coordinatesSource) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesBuildingsInsertCall) Fields(s ...googleapi.Field) *ResourcesBuildingsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesBuildingsInsertCall) Context(ctx context.Context) *ResourcesBuildingsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesBuildingsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesBuildingsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.building) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/buildings") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.buildings.insert" call. +// Exactly one of *Building or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Building.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ResourcesBuildingsInsertCall) Do(opts ...googleapi.CallOption) (*Building, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Building{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a building.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings", + // "httpMethod": "POST", + // "id": "directory.resources.buildings.insert", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "coordinatesSource": { + // "default": "SOURCE_UNSPECIFIED", + // "description": "Source from which Building.coordinates are derived.", + // "enum": [ + // "CLIENT_SPECIFIED", + // "RESOLVED_FROM_ADDRESS", + // "SOURCE_UNSPECIFIED" + // ], + // "enumDescriptions": [ + // "Building.coordinates are set to the coordinates included in the request.", + // "Building.coordinates are automatically populated based on the postal address.", + // "Defaults to `RESOLVED_FROM_ADDRESS` if postal address is provided. Otherwise, defaults to `CLIENT_SPECIFIED` if coordinates are provided." + // ], + // "location": "query", + // "type": "string" + // }, + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/buildings", + // "request": { + // "$ref": "Building" + // }, + // "response": { + // "$ref": "Building" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.buildings.list": + +type ResourcesBuildingsListCall struct { + s *Service + customer string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of buildings for an account. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesBuildingsService) List(customer string) *ResourcesBuildingsListCall { + c := &ResourcesBuildingsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. +func (c *ResourcesBuildingsListCall) MaxResults(maxResults int64) *ResourcesBuildingsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// the next page in the list. +func (c *ResourcesBuildingsListCall) PageToken(pageToken string) *ResourcesBuildingsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesBuildingsListCall) Fields(s ...googleapi.Field) *ResourcesBuildingsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcesBuildingsListCall) IfNoneMatch(entityTag string) *ResourcesBuildingsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesBuildingsListCall) Context(ctx context.Context) *ResourcesBuildingsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesBuildingsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesBuildingsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/buildings") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.buildings.list" call. +// Exactly one of *Buildings or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Buildings.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ResourcesBuildingsListCall) Do(opts ...googleapi.CallOption) (*Buildings, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Buildings{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of buildings for an account.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings", + // "httpMethod": "GET", + // "id": "directory.resources.buildings.list", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "maximum": "500", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Token to specify the next page in the list.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/buildings", + // "response": { + // "$ref": "Buildings" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar", + // "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResourcesBuildingsListCall) Pages(ctx context.Context, f func(*Buildings) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "directory.resources.buildings.patch": + +type ResourcesBuildingsPatchCall struct { + s *Service + customer string + buildingId string + building *Building + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a building. +// +// - buildingId: The id of the building to update. +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesBuildingsService) Patch(customer string, buildingId string, building *Building) *ResourcesBuildingsPatchCall { + c := &ResourcesBuildingsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.buildingId = buildingId + c.building = building + return c +} + +// CoordinatesSource sets the optional parameter "coordinatesSource": +// Source from which Building.coordinates are derived. +// +// Possible values: +// +// "CLIENT_SPECIFIED" - Building.coordinates are set to the +// +// coordinates included in the request. +// +// "RESOLVED_FROM_ADDRESS" - Building.coordinates are automatically +// +// populated based on the postal address. +// +// "SOURCE_UNSPECIFIED" (default) - Defaults to +// +// `RESOLVED_FROM_ADDRESS` if postal address is provided. Otherwise, +// defaults to `CLIENT_SPECIFIED` if coordinates are provided. +func (c *ResourcesBuildingsPatchCall) CoordinatesSource(coordinatesSource string) *ResourcesBuildingsPatchCall { + c.urlParams_.Set("coordinatesSource", coordinatesSource) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesBuildingsPatchCall) Fields(s ...googleapi.Field) *ResourcesBuildingsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesBuildingsPatchCall) Context(ctx context.Context) *ResourcesBuildingsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesBuildingsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesBuildingsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.building) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "buildingId": c.buildingId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.buildings.patch" call. +// Exactly one of *Building or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Building.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ResourcesBuildingsPatchCall) Do(opts ...googleapi.CallOption) (*Building, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Building{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a building.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + // "httpMethod": "PATCH", + // "id": "directory.resources.buildings.patch", + // "parameterOrder": [ + // "customer", + // "buildingId" + // ], + // "parameters": { + // "buildingId": { + // "description": "The id of the building to update.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "coordinatesSource": { + // "default": "SOURCE_UNSPECIFIED", + // "description": "Source from which Building.coordinates are derived.", + // "enum": [ + // "CLIENT_SPECIFIED", + // "RESOLVED_FROM_ADDRESS", + // "SOURCE_UNSPECIFIED" + // ], + // "enumDescriptions": [ + // "Building.coordinates are set to the coordinates included in the request.", + // "Building.coordinates are automatically populated based on the postal address.", + // "Defaults to `RESOLVED_FROM_ADDRESS` if postal address is provided. Otherwise, defaults to `CLIENT_SPECIFIED` if coordinates are provided." + // ], + // "location": "query", + // "type": "string" + // }, + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + // "request": { + // "$ref": "Building" + // }, + // "response": { + // "$ref": "Building" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.buildings.update": + +type ResourcesBuildingsUpdateCall struct { + s *Service + customer string + buildingId string + building *Building + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a building. +// +// - buildingId: The id of the building to update. +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesBuildingsService) Update(customer string, buildingId string, building *Building) *ResourcesBuildingsUpdateCall { + c := &ResourcesBuildingsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.buildingId = buildingId + c.building = building + return c +} + +// CoordinatesSource sets the optional parameter "coordinatesSource": +// Source from which Building.coordinates are derived. +// +// Possible values: +// +// "CLIENT_SPECIFIED" - Building.coordinates are set to the +// +// coordinates included in the request. +// +// "RESOLVED_FROM_ADDRESS" - Building.coordinates are automatically +// +// populated based on the postal address. +// +// "SOURCE_UNSPECIFIED" (default) - Defaults to +// +// `RESOLVED_FROM_ADDRESS` if postal address is provided. Otherwise, +// defaults to `CLIENT_SPECIFIED` if coordinates are provided. +func (c *ResourcesBuildingsUpdateCall) CoordinatesSource(coordinatesSource string) *ResourcesBuildingsUpdateCall { + c.urlParams_.Set("coordinatesSource", coordinatesSource) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesBuildingsUpdateCall) Fields(s ...googleapi.Field) *ResourcesBuildingsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesBuildingsUpdateCall) Context(ctx context.Context) *ResourcesBuildingsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesBuildingsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesBuildingsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.building) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "buildingId": c.buildingId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.buildings.update" call. +// Exactly one of *Building or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Building.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ResourcesBuildingsUpdateCall) Do(opts ...googleapi.CallOption) (*Building, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Building{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a building.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + // "httpMethod": "PUT", + // "id": "directory.resources.buildings.update", + // "parameterOrder": [ + // "customer", + // "buildingId" + // ], + // "parameters": { + // "buildingId": { + // "description": "The id of the building to update.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "coordinatesSource": { + // "default": "SOURCE_UNSPECIFIED", + // "description": "Source from which Building.coordinates are derived.", + // "enum": [ + // "CLIENT_SPECIFIED", + // "RESOLVED_FROM_ADDRESS", + // "SOURCE_UNSPECIFIED" + // ], + // "enumDescriptions": [ + // "Building.coordinates are set to the coordinates included in the request.", + // "Building.coordinates are automatically populated based on the postal address.", + // "Defaults to `RESOLVED_FROM_ADDRESS` if postal address is provided. Otherwise, defaults to `CLIENT_SPECIFIED` if coordinates are provided." + // ], + // "location": "query", + // "type": "string" + // }, + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/buildings/{buildingId}", + // "request": { + // "$ref": "Building" + // }, + // "response": { + // "$ref": "Building" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.calendars.delete": + +type ResourcesCalendarsDeleteCall struct { + s *Service + customer string + calendarResourceId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a calendar resource. +// +// - calendarResourceId: The unique ID of the calendar resource to +// delete. +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesCalendarsService) Delete(customer string, calendarResourceId string) *ResourcesCalendarsDeleteCall { + c := &ResourcesCalendarsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.calendarResourceId = calendarResourceId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesCalendarsDeleteCall) Fields(s ...googleapi.Field) *ResourcesCalendarsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesCalendarsDeleteCall) Context(ctx context.Context) *ResourcesCalendarsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesCalendarsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesCalendarsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "calendarResourceId": c.calendarResourceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.calendars.delete" call. +func (c *ResourcesCalendarsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes a calendar resource.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + // "httpMethod": "DELETE", + // "id": "directory.resources.calendars.delete", + // "parameterOrder": [ + // "customer", + // "calendarResourceId" + // ], + // "parameters": { + // "calendarResourceId": { + // "description": "The unique ID of the calendar resource to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.calendars.get": + +type ResourcesCalendarsGetCall struct { + s *Service + customer string + calendarResourceId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a calendar resource. +// +// - calendarResourceId: The unique ID of the calendar resource to +// retrieve. +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesCalendarsService) Get(customer string, calendarResourceId string) *ResourcesCalendarsGetCall { + c := &ResourcesCalendarsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.calendarResourceId = calendarResourceId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesCalendarsGetCall) Fields(s ...googleapi.Field) *ResourcesCalendarsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcesCalendarsGetCall) IfNoneMatch(entityTag string) *ResourcesCalendarsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesCalendarsGetCall) Context(ctx context.Context) *ResourcesCalendarsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesCalendarsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesCalendarsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "calendarResourceId": c.calendarResourceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.calendars.get" call. +// Exactly one of *CalendarResource or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *CalendarResource.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcesCalendarsGetCall) Do(opts ...googleapi.CallOption) (*CalendarResource, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &CalendarResource{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a calendar resource.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + // "httpMethod": "GET", + // "id": "directory.resources.calendars.get", + // "parameterOrder": [ + // "customer", + // "calendarResourceId" + // ], + // "parameters": { + // "calendarResourceId": { + // "description": "The unique ID of the calendar resource to retrieve.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + // "response": { + // "$ref": "CalendarResource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar", + // "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + // ] + // } + +} + +// method id "directory.resources.calendars.insert": + +type ResourcesCalendarsInsertCall struct { + s *Service + customer string + calendarresource *CalendarResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Inserts a calendar resource. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesCalendarsService) Insert(customer string, calendarresource *CalendarResource) *ResourcesCalendarsInsertCall { + c := &ResourcesCalendarsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.calendarresource = calendarresource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesCalendarsInsertCall) Fields(s ...googleapi.Field) *ResourcesCalendarsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesCalendarsInsertCall) Context(ctx context.Context) *ResourcesCalendarsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesCalendarsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesCalendarsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendarresource) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/calendars") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.calendars.insert" call. +// Exactly one of *CalendarResource or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *CalendarResource.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcesCalendarsInsertCall) Do(opts ...googleapi.CallOption) (*CalendarResource, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &CalendarResource{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a calendar resource.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars", + // "httpMethod": "POST", + // "id": "directory.resources.calendars.insert", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/calendars", + // "request": { + // "$ref": "CalendarResource" + // }, + // "response": { + // "$ref": "CalendarResource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.calendars.list": + +type ResourcesCalendarsListCall struct { + s *Service + customer string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of calendar resources for an account. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesCalendarsService) List(customer string) *ResourcesCalendarsListCall { + c := &ResourcesCalendarsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. +func (c *ResourcesCalendarsListCall) MaxResults(maxResults int64) *ResourcesCalendarsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Field(s) to sort +// results by in either ascending or descending order. Supported fields +// include `resourceId`, `resourceName`, `capacity`, `buildingId`, and +// `floorName`. If no order is specified, defaults to ascending. Should +// be of the form "field [asc|desc], field [asc|desc], ...". For example +// `buildingId, capacity desc` would return results sorted first by +// `buildingId` in ascending order then by `capacity` in descending +// order. +func (c *ResourcesCalendarsListCall) OrderBy(orderBy string) *ResourcesCalendarsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// the next page in the list. +func (c *ResourcesCalendarsListCall) PageToken(pageToken string) *ResourcesCalendarsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Query sets the optional parameter "query": String query used to +// filter results. Should be of the form "field operator value" where +// field can be any of supported fields and operators can be any of +// supported operations. Operators include '=' for exact match, '!=' for +// mismatch and ':' for prefix match or HAS match where applicable. For +// prefix match, the value should always be followed by a *. Logical +// operators NOT and AND are supported (in this order of precedence). +// Supported fields include `generatedResourceName`, `name`, +// `buildingId`, `floor_name`, `capacity`, +// `featureInstances.feature.name`, `resourceEmail`, `resourceCategory`. +// For example `buildingId=US-NYC-9TH AND +// featureInstances.feature.name:Phone`. +func (c *ResourcesCalendarsListCall) Query(query string) *ResourcesCalendarsListCall { + c.urlParams_.Set("query", query) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesCalendarsListCall) Fields(s ...googleapi.Field) *ResourcesCalendarsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcesCalendarsListCall) IfNoneMatch(entityTag string) *ResourcesCalendarsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesCalendarsListCall) Context(ctx context.Context) *ResourcesCalendarsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesCalendarsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesCalendarsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/calendars") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.calendars.list" call. +// Exactly one of *CalendarResources or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *CalendarResources.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcesCalendarsListCall) Do(opts ...googleapi.CallOption) (*CalendarResources, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &CalendarResources{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of calendar resources for an account.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars", + // "httpMethod": "GET", + // "id": "directory.resources.calendars.list", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "maximum": "500", + // "minimum": "1", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Field(s) to sort results by in either ascending or descending order. Supported fields include `resourceId`, `resourceName`, `capacity`, `buildingId`, and `floorName`. If no order is specified, defaults to ascending. Should be of the form \"field [asc|desc], field [asc|desc], ...\". For example `buildingId, capacity desc` would return results sorted first by `buildingId` in ascending order then by `capacity` in descending order.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Token to specify the next page in the list.", + // "location": "query", + // "type": "string" + // }, + // "query": { + // "description": "String query used to filter results. Should be of the form \"field operator value\" where field can be any of supported fields and operators can be any of supported operations. Operators include '=' for exact match, '!=' for mismatch and ':' for prefix match or HAS match where applicable. For prefix match, the value should always be followed by a *. Logical operators NOT and AND are supported (in this order of precedence). Supported fields include `generatedResourceName`, `name`, `buildingId`, `floor_name`, `capacity`, `featureInstances.feature.name`, `resourceEmail`, `resourceCategory`. For example `buildingId=US-NYC-9TH AND featureInstances.feature.name:Phone`.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/calendars", + // "response": { + // "$ref": "CalendarResources" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar", + // "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResourcesCalendarsListCall) Pages(ctx context.Context, f func(*CalendarResources) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "directory.resources.calendars.patch": + +type ResourcesCalendarsPatchCall struct { + s *Service + customer string + calendarResourceId string + calendarresource *CalendarResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a calendar resource. +// +// - calendarResourceId: The unique ID of the calendar resource to +// update. +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesCalendarsService) Patch(customer string, calendarResourceId string, calendarresource *CalendarResource) *ResourcesCalendarsPatchCall { + c := &ResourcesCalendarsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.calendarResourceId = calendarResourceId + c.calendarresource = calendarresource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesCalendarsPatchCall) Fields(s ...googleapi.Field) *ResourcesCalendarsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesCalendarsPatchCall) Context(ctx context.Context) *ResourcesCalendarsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesCalendarsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesCalendarsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendarresource) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "calendarResourceId": c.calendarResourceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.calendars.patch" call. +// Exactly one of *CalendarResource or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *CalendarResource.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcesCalendarsPatchCall) Do(opts ...googleapi.CallOption) (*CalendarResource, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &CalendarResource{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a calendar resource.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + // "httpMethod": "PATCH", + // "id": "directory.resources.calendars.patch", + // "parameterOrder": [ + // "customer", + // "calendarResourceId" + // ], + // "parameters": { + // "calendarResourceId": { + // "description": "The unique ID of the calendar resource to update.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + // "request": { + // "$ref": "CalendarResource" + // }, + // "response": { + // "$ref": "CalendarResource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.calendars.update": + +type ResourcesCalendarsUpdateCall struct { + s *Service + customer string + calendarResourceId string + calendarresource *CalendarResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a calendar resource. This method supports patch +// semantics, meaning you only need to include the fields you wish to +// update. Fields that are not present in the request will be preserved. +// +// - calendarResourceId: The unique ID of the calendar resource to +// update. +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesCalendarsService) Update(customer string, calendarResourceId string, calendarresource *CalendarResource) *ResourcesCalendarsUpdateCall { + c := &ResourcesCalendarsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.calendarResourceId = calendarResourceId + c.calendarresource = calendarresource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesCalendarsUpdateCall) Fields(s ...googleapi.Field) *ResourcesCalendarsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesCalendarsUpdateCall) Context(ctx context.Context) *ResourcesCalendarsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesCalendarsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesCalendarsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.calendarresource) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "calendarResourceId": c.calendarResourceId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.calendars.update" call. +// Exactly one of *CalendarResource or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *CalendarResource.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcesCalendarsUpdateCall) Do(opts ...googleapi.CallOption) (*CalendarResource, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &CalendarResource{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a calendar resource. This method supports patch semantics, meaning you only need to include the fields you wish to update. Fields that are not present in the request will be preserved.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + // "httpMethod": "PUT", + // "id": "directory.resources.calendars.update", + // "parameterOrder": [ + // "customer", + // "calendarResourceId" + // ], + // "parameters": { + // "calendarResourceId": { + // "description": "The unique ID of the calendar resource to update.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/calendars/{calendarResourceId}", + // "request": { + // "$ref": "CalendarResource" + // }, + // "response": { + // "$ref": "CalendarResource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.features.delete": + +type ResourcesFeaturesDeleteCall struct { + s *Service + customer string + featureKey string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a feature. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +// - featureKey: The unique ID of the feature to delete. +func (r *ResourcesFeaturesService) Delete(customer string, featureKey string) *ResourcesFeaturesDeleteCall { + c := &ResourcesFeaturesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.featureKey = featureKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesFeaturesDeleteCall) Fields(s ...googleapi.Field) *ResourcesFeaturesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesFeaturesDeleteCall) Context(ctx context.Context) *ResourcesFeaturesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesFeaturesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesFeaturesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/features/{featureKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "featureKey": c.featureKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.features.delete" call. +func (c *ResourcesFeaturesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes a feature.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + // "httpMethod": "DELETE", + // "id": "directory.resources.features.delete", + // "parameterOrder": [ + // "customer", + // "featureKey" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "featureKey": { + // "description": "The unique ID of the feature to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.features.get": + +type ResourcesFeaturesGetCall struct { + s *Service + customer string + featureKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a feature. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +// - featureKey: The unique ID of the feature to retrieve. +func (r *ResourcesFeaturesService) Get(customer string, featureKey string) *ResourcesFeaturesGetCall { + c := &ResourcesFeaturesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.featureKey = featureKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesFeaturesGetCall) Fields(s ...googleapi.Field) *ResourcesFeaturesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcesFeaturesGetCall) IfNoneMatch(entityTag string) *ResourcesFeaturesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesFeaturesGetCall) Context(ctx context.Context) *ResourcesFeaturesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesFeaturesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesFeaturesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/features/{featureKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "featureKey": c.featureKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.features.get" call. +// Exactly one of *Feature or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Feature.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ResourcesFeaturesGetCall) Do(opts ...googleapi.CallOption) (*Feature, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Feature{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a feature.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + // "httpMethod": "GET", + // "id": "directory.resources.features.get", + // "parameterOrder": [ + // "customer", + // "featureKey" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "featureKey": { + // "description": "The unique ID of the feature to retrieve.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + // "response": { + // "$ref": "Feature" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar", + // "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + // ] + // } + +} + +// method id "directory.resources.features.insert": + +type ResourcesFeaturesInsertCall struct { + s *Service + customer string + feature *Feature + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Inserts a feature. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesFeaturesService) Insert(customer string, feature *Feature) *ResourcesFeaturesInsertCall { + c := &ResourcesFeaturesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.feature = feature + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesFeaturesInsertCall) Fields(s ...googleapi.Field) *ResourcesFeaturesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesFeaturesInsertCall) Context(ctx context.Context) *ResourcesFeaturesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesFeaturesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesFeaturesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.feature) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/features") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.features.insert" call. +// Exactly one of *Feature or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Feature.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ResourcesFeaturesInsertCall) Do(opts ...googleapi.CallOption) (*Feature, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Feature{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a feature.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/features", + // "httpMethod": "POST", + // "id": "directory.resources.features.insert", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/features", + // "request": { + // "$ref": "Feature" + // }, + // "response": { + // "$ref": "Feature" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.features.list": + +type ResourcesFeaturesListCall struct { + s *Service + customer string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of features for an account. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +func (r *ResourcesFeaturesService) List(customer string) *ResourcesFeaturesListCall { + c := &ResourcesFeaturesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. +func (c *ResourcesFeaturesListCall) MaxResults(maxResults int64) *ResourcesFeaturesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// the next page in the list. +func (c *ResourcesFeaturesListCall) PageToken(pageToken string) *ResourcesFeaturesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesFeaturesListCall) Fields(s ...googleapi.Field) *ResourcesFeaturesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcesFeaturesListCall) IfNoneMatch(entityTag string) *ResourcesFeaturesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesFeaturesListCall) Context(ctx context.Context) *ResourcesFeaturesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesFeaturesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesFeaturesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/features") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.features.list" call. +// Exactly one of *Features or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Features.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ResourcesFeaturesListCall) Do(opts ...googleapi.CallOption) (*Features, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Features{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of features for an account.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/features", + // "httpMethod": "GET", + // "id": "directory.resources.features.list", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "maximum": "500", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Token to specify the next page in the list.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/features", + // "response": { + // "$ref": "Features" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar", + // "https://www.googleapis.com/auth/admin.directory.resource.calendar.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResourcesFeaturesListCall) Pages(ctx context.Context, f func(*Features) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "directory.resources.features.patch": + +type ResourcesFeaturesPatchCall struct { + s *Service + customer string + featureKey string + feature *Feature + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a feature. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +// - featureKey: The unique ID of the feature to update. +func (r *ResourcesFeaturesService) Patch(customer string, featureKey string, feature *Feature) *ResourcesFeaturesPatchCall { + c := &ResourcesFeaturesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.featureKey = featureKey + c.feature = feature + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesFeaturesPatchCall) Fields(s ...googleapi.Field) *ResourcesFeaturesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesFeaturesPatchCall) Context(ctx context.Context) *ResourcesFeaturesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesFeaturesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesFeaturesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.feature) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/features/{featureKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "featureKey": c.featureKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.features.patch" call. +// Exactly one of *Feature or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Feature.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ResourcesFeaturesPatchCall) Do(opts ...googleapi.CallOption) (*Feature, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Feature{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a feature.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + // "httpMethod": "PATCH", + // "id": "directory.resources.features.patch", + // "parameterOrder": [ + // "customer", + // "featureKey" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "featureKey": { + // "description": "The unique ID of the feature to update.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + // "request": { + // "$ref": "Feature" + // }, + // "response": { + // "$ref": "Feature" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.features.rename": + +type ResourcesFeaturesRenameCall struct { + s *Service + customer string + oldName string + featurerename *FeatureRename + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rename: Renames a feature. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +// - oldName: The unique ID of the feature to rename. +func (r *ResourcesFeaturesService) Rename(customer string, oldName string, featurerename *FeatureRename) *ResourcesFeaturesRenameCall { + c := &ResourcesFeaturesRenameCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.oldName = oldName + c.featurerename = featurerename + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesFeaturesRenameCall) Fields(s ...googleapi.Field) *ResourcesFeaturesRenameCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesFeaturesRenameCall) Context(ctx context.Context) *ResourcesFeaturesRenameCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesFeaturesRenameCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesFeaturesRenameCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.featurerename) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/features/{oldName}/rename") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "oldName": c.oldName, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.features.rename" call. +func (c *ResourcesFeaturesRenameCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Renames a feature.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/features/{oldName}/rename", + // "httpMethod": "POST", + // "id": "directory.resources.features.rename", + // "parameterOrder": [ + // "customer", + // "oldName" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "oldName": { + // "description": "The unique ID of the feature to rename.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/features/{oldName}/rename", + // "request": { + // "$ref": "FeatureRename" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.resources.features.update": + +type ResourcesFeaturesUpdateCall struct { + s *Service + customer string + featureKey string + feature *Feature + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a feature. +// +// - customer: The unique ID for the customer's Google Workspace +// account. As an account administrator, you can also use the +// `my_customer` alias to represent your account's customer ID. +// - featureKey: The unique ID of the feature to update. +func (r *ResourcesFeaturesService) Update(customer string, featureKey string, feature *Feature) *ResourcesFeaturesUpdateCall { + c := &ResourcesFeaturesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.featureKey = featureKey + c.feature = feature + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcesFeaturesUpdateCall) Fields(s ...googleapi.Field) *ResourcesFeaturesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcesFeaturesUpdateCall) Context(ctx context.Context) *ResourcesFeaturesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcesFeaturesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcesFeaturesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.feature) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/resources/features/{featureKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "featureKey": c.featureKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.resources.features.update" call. +// Exactly one of *Feature or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Feature.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ResourcesFeaturesUpdateCall) Do(opts ...googleapi.CallOption) (*Feature, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Feature{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a feature.", + // "flatPath": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + // "httpMethod": "PUT", + // "id": "directory.resources.features.update", + // "parameterOrder": [ + // "customer", + // "featureKey" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. As an account administrator, you can also use the `my_customer` alias to represent your account's customer ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "featureKey": { + // "description": "The unique ID of the feature to update.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/resources/features/{featureKey}", + // "request": { + // "$ref": "Feature" + // }, + // "response": { + // "$ref": "Feature" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.resource.calendar" + // ] + // } + +} + +// method id "directory.roleAssignments.delete": + +type RoleAssignmentsDeleteCall struct { + s *Service + customer string + roleAssignmentId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a role assignment. +// +// - customer: Immutable ID of the Google Workspace account. +// - roleAssignmentId: Immutable ID of the role assignment. +func (r *RoleAssignmentsService) Delete(customer string, roleAssignmentId string) *RoleAssignmentsDeleteCall { + c := &RoleAssignmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.roleAssignmentId = roleAssignmentId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoleAssignmentsDeleteCall) Fields(s ...googleapi.Field) *RoleAssignmentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoleAssignmentsDeleteCall) Context(ctx context.Context) *RoleAssignmentsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoleAssignmentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoleAssignmentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roleassignments/{roleAssignmentId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "roleAssignmentId": c.roleAssignmentId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.roleAssignments.delete" call. +func (c *RoleAssignmentsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes a role assignment.", + // "flatPath": "admin/directory/v1/customer/{customer}/roleassignments/{roleAssignmentId}", + // "httpMethod": "DELETE", + // "id": "directory.roleAssignments.delete", + // "parameterOrder": [ + // "customer", + // "roleAssignmentId" + // ], + // "parameters": { + // "customer": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "roleAssignmentId": { + // "description": "Immutable ID of the role assignment.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roleassignments/{roleAssignmentId}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement" + // ] + // } + +} + +// method id "directory.roleAssignments.get": + +type RoleAssignmentsGetCall struct { + s *Service + customer string + roleAssignmentId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a role assignment. +// +// - customer: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +// - roleAssignmentId: Immutable ID of the role assignment. +func (r *RoleAssignmentsService) Get(customer string, roleAssignmentId string) *RoleAssignmentsGetCall { + c := &RoleAssignmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.roleAssignmentId = roleAssignmentId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoleAssignmentsGetCall) Fields(s ...googleapi.Field) *RoleAssignmentsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoleAssignmentsGetCall) IfNoneMatch(entityTag string) *RoleAssignmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoleAssignmentsGetCall) Context(ctx context.Context) *RoleAssignmentsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoleAssignmentsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoleAssignmentsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roleassignments/{roleAssignmentId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "roleAssignmentId": c.roleAssignmentId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.roleAssignments.get" call. +// Exactly one of *RoleAssignment or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RoleAssignment.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoleAssignmentsGetCall) Do(opts ...googleapi.CallOption) (*RoleAssignment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &RoleAssignment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a role assignment.", + // "flatPath": "admin/directory/v1/customer/{customer}/roleassignments/{roleAssignmentId}", + // "httpMethod": "GET", + // "id": "directory.roleAssignments.get", + // "parameterOrder": [ + // "customer", + // "roleAssignmentId" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "roleAssignmentId": { + // "description": "Immutable ID of the role assignment.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roleassignments/{roleAssignmentId}", + // "response": { + // "$ref": "RoleAssignment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement", + // "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + // ] + // } + +} + +// method id "directory.roleAssignments.insert": + +type RoleAssignmentsInsertCall struct { + s *Service + customer string + roleassignment *RoleAssignment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a role assignment. +// +// - customer: Immutable ID of the Google Workspace account. +func (r *RoleAssignmentsService) Insert(customer string, roleassignment *RoleAssignment) *RoleAssignmentsInsertCall { + c := &RoleAssignmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.roleassignment = roleassignment + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoleAssignmentsInsertCall) Fields(s ...googleapi.Field) *RoleAssignmentsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoleAssignmentsInsertCall) Context(ctx context.Context) *RoleAssignmentsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoleAssignmentsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoleAssignmentsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.roleassignment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roleassignments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.roleAssignments.insert" call. +// Exactly one of *RoleAssignment or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RoleAssignment.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoleAssignmentsInsertCall) Do(opts ...googleapi.CallOption) (*RoleAssignment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &RoleAssignment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a role assignment.", + // "flatPath": "admin/directory/v1/customer/{customer}/roleassignments", + // "httpMethod": "POST", + // "id": "directory.roleAssignments.insert", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roleassignments", + // "request": { + // "$ref": "RoleAssignment" + // }, + // "response": { + // "$ref": "RoleAssignment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement" + // ] + // } + +} + +// method id "directory.roleAssignments.list": + +type RoleAssignmentsListCall struct { + s *Service + customer string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a paginated list of all roleAssignments. +// +// - customer: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +func (r *RoleAssignmentsService) List(customer string) *RoleAssignmentsListCall { + c := &RoleAssignmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + return c +} + +// IncludeIndirectRoleAssignments sets the optional parameter +// "includeIndirectRoleAssignments": When set to `true`, fetches +// indirect role assignments (i.e. role assignment via a group) as well +// as direct ones. Defaults to `false`. You must specify `user_key` or +// the indirect role assignments will not be included. +func (c *RoleAssignmentsListCall) IncludeIndirectRoleAssignments(includeIndirectRoleAssignments bool) *RoleAssignmentsListCall { + c.urlParams_.Set("includeIndirectRoleAssignments", fmt.Sprint(includeIndirectRoleAssignments)) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. +func (c *RoleAssignmentsListCall) MaxResults(maxResults int64) *RoleAssignmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// the next page in the list. +func (c *RoleAssignmentsListCall) PageToken(pageToken string) *RoleAssignmentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// RoleId sets the optional parameter "roleId": Immutable ID of a role. +// If included in the request, returns only role assignments containing +// this role ID. +func (c *RoleAssignmentsListCall) RoleId(roleId string) *RoleAssignmentsListCall { + c.urlParams_.Set("roleId", roleId) + return c +} + +// UserKey sets the optional parameter "userKey": The primary email +// address, alias email address, or unique user or group ID. If included +// in the request, returns role assignments only for this user or group. +func (c *RoleAssignmentsListCall) UserKey(userKey string) *RoleAssignmentsListCall { + c.urlParams_.Set("userKey", userKey) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoleAssignmentsListCall) Fields(s ...googleapi.Field) *RoleAssignmentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoleAssignmentsListCall) IfNoneMatch(entityTag string) *RoleAssignmentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoleAssignmentsListCall) Context(ctx context.Context) *RoleAssignmentsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoleAssignmentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoleAssignmentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roleassignments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.roleAssignments.list" call. +// Exactly one of *RoleAssignments or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RoleAssignments.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoleAssignmentsListCall) Do(opts ...googleapi.CallOption) (*RoleAssignments, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &RoleAssignments{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a paginated list of all roleAssignments.", + // "flatPath": "admin/directory/v1/customer/{customer}/roleassignments", + // "httpMethod": "GET", + // "id": "directory.roleAssignments.list", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "includeIndirectRoleAssignments": { + // "description": "When set to `true`, fetches indirect role assignments (i.e. role assignment via a group) as well as direct ones. Defaults to `false`. You must specify `user_key` or the indirect role assignments will not be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "description": "Maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "maximum": "200", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Token to specify the next page in the list.", + // "location": "query", + // "type": "string" + // }, + // "roleId": { + // "description": "Immutable ID of a role. If included in the request, returns only role assignments containing this role ID.", + // "location": "query", + // "type": "string" + // }, + // "userKey": { + // "description": "The primary email address, alias email address, or unique user or group ID. If included in the request, returns role assignments only for this user or group.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roleassignments", + // "response": { + // "$ref": "RoleAssignments" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement", + // "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RoleAssignmentsListCall) Pages(ctx context.Context, f func(*RoleAssignments) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "directory.roles.delete": + +type RolesDeleteCall struct { + s *Service + customer string + roleId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a role. +// +// - customer: Immutable ID of the Google Workspace account. +// - roleId: Immutable ID of the role. +func (r *RolesService) Delete(customer string, roleId string) *RolesDeleteCall { + c := &RolesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.roleId = roleId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RolesDeleteCall) Fields(s ...googleapi.Field) *RolesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RolesDeleteCall) Context(ctx context.Context) *RolesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RolesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RolesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roles/{roleId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "roleId": c.roleId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.roles.delete" call. +func (c *RolesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes a role.", + // "flatPath": "admin/directory/v1/customer/{customer}/roles/{roleId}", + // "httpMethod": "DELETE", + // "id": "directory.roles.delete", + // "parameterOrder": [ + // "customer", + // "roleId" + // ], + // "parameters": { + // "customer": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "roleId": { + // "description": "Immutable ID of the role.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roles/{roleId}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement" + // ] + // } + +} + +// method id "directory.roles.get": + +type RolesGetCall struct { + s *Service + customer string + roleId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a role. +// +// - customer: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +// - roleId: Immutable ID of the role. +func (r *RolesService) Get(customer string, roleId string) *RolesGetCall { + c := &RolesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.roleId = roleId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RolesGetCall) Fields(s ...googleapi.Field) *RolesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RolesGetCall) IfNoneMatch(entityTag string) *RolesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RolesGetCall) Context(ctx context.Context) *RolesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RolesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RolesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roles/{roleId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "roleId": c.roleId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.roles.get" call. +// Exactly one of *Role or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Role.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RolesGetCall) Do(opts ...googleapi.CallOption) (*Role, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Role{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a role.", + // "flatPath": "admin/directory/v1/customer/{customer}/roles/{roleId}", + // "httpMethod": "GET", + // "id": "directory.roles.get", + // "parameterOrder": [ + // "customer", + // "roleId" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "roleId": { + // "description": "Immutable ID of the role.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roles/{roleId}", + // "response": { + // "$ref": "Role" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement", + // "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + // ] + // } + +} + +// method id "directory.roles.insert": + +type RolesInsertCall struct { + s *Service + customer string + role *Role + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a role. +// +// - customer: Immutable ID of the Google Workspace account. +func (r *RolesService) Insert(customer string, role *Role) *RolesInsertCall { + c := &RolesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.role = role + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RolesInsertCall) Fields(s ...googleapi.Field) *RolesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RolesInsertCall) Context(ctx context.Context) *RolesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RolesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RolesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.role) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roles") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.roles.insert" call. +// Exactly one of *Role or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Role.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RolesInsertCall) Do(opts ...googleapi.CallOption) (*Role, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Role{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a role.", + // "flatPath": "admin/directory/v1/customer/{customer}/roles", + // "httpMethod": "POST", + // "id": "directory.roles.insert", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roles", + // "request": { + // "$ref": "Role" + // }, + // "response": { + // "$ref": "Role" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement" + // ] + // } + +} + +// method id "directory.roles.list": + +type RolesListCall struct { + s *Service + customer string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a paginated list of all the roles in a domain. +// +// - customer: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +func (r *RolesService) List(customer string) *RolesListCall { + c := &RolesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. +func (c *RolesListCall) MaxResults(maxResults int64) *RolesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// the next page in the list. +func (c *RolesListCall) PageToken(pageToken string) *RolesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RolesListCall) Fields(s ...googleapi.Field) *RolesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RolesListCall) IfNoneMatch(entityTag string) *RolesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RolesListCall) Context(ctx context.Context) *RolesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RolesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RolesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roles") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.roles.list" call. +// Exactly one of *Roles or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Roles.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RolesListCall) Do(opts ...googleapi.CallOption) (*Roles, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Roles{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a paginated list of all the roles in a domain.", + // "flatPath": "admin/directory/v1/customer/{customer}/roles", + // "httpMethod": "GET", + // "id": "directory.roles.list", + // "parameterOrder": [ + // "customer" + // ], + // "parameters": { + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "maximum": "100", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Token to specify the next page in the list.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roles", + // "response": { + // "$ref": "Roles" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement", + // "https://www.googleapis.com/auth/admin.directory.rolemanagement.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RolesListCall) Pages(ctx context.Context, f func(*Roles) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "directory.roles.patch": + +type RolesPatchCall struct { + s *Service + customer string + roleId string + role *Role + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a role. +// +// - customer: Immutable ID of the Google Workspace account. +// - roleId: Immutable ID of the role. +func (r *RolesService) Patch(customer string, roleId string, role *Role) *RolesPatchCall { + c := &RolesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.roleId = roleId + c.role = role + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RolesPatchCall) Fields(s ...googleapi.Field) *RolesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RolesPatchCall) Context(ctx context.Context) *RolesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RolesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RolesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.role) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roles/{roleId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "roleId": c.roleId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.roles.patch" call. +// Exactly one of *Role or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Role.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Role{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a role.", + // "flatPath": "admin/directory/v1/customer/{customer}/roles/{roleId}", + // "httpMethod": "PATCH", + // "id": "directory.roles.patch", + // "parameterOrder": [ + // "customer", + // "roleId" + // ], + // "parameters": { + // "customer": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "roleId": { + // "description": "Immutable ID of the role.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roles/{roleId}", + // "request": { + // "$ref": "Role" + // }, + // "response": { + // "$ref": "Role" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement" + // ] + // } + +} + +// method id "directory.roles.update": + +type RolesUpdateCall struct { + s *Service + customer string + roleId string + role *Role + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a role. +// +// - customer: Immutable ID of the Google Workspace account. +// - roleId: Immutable ID of the role. +func (r *RolesService) Update(customer string, roleId string, role *Role) *RolesUpdateCall { + c := &RolesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customer = customer + c.roleId = roleId + c.role = role + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RolesUpdateCall) Fields(s ...googleapi.Field) *RolesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RolesUpdateCall) Context(ctx context.Context) *RolesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RolesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RolesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.role) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customer}/roles/{roleId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customer": c.customer, + "roleId": c.roleId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.roles.update" call. +// Exactly one of *Role or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Role.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RolesUpdateCall) Do(opts ...googleapi.CallOption) (*Role, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Role{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a role.", + // "flatPath": "admin/directory/v1/customer/{customer}/roles/{roleId}", + // "httpMethod": "PUT", + // "id": "directory.roles.update", + // "parameterOrder": [ + // "customer", + // "roleId" + // ], + // "parameters": { + // "customer": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "roleId": { + // "description": "Immutable ID of the role.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customer}/roles/{roleId}", + // "request": { + // "$ref": "Role" + // }, + // "response": { + // "$ref": "Role" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.rolemanagement" + // ] + // } + +} + +// method id "directory.schemas.delete": + +type SchemasDeleteCall struct { + s *Service + customerId string + schemaKey string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a schema. +// +// - customerId: Immutable ID of the Google Workspace account. +// - schemaKey: Name or immutable ID of the schema. +func (r *SchemasService) Delete(customerId string, schemaKey string) *SchemasDeleteCall { + c := &SchemasDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.schemaKey = schemaKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SchemasDeleteCall) Fields(s ...googleapi.Field) *SchemasDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SchemasDeleteCall) Context(ctx context.Context) *SchemasDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SchemasDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SchemasDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "schemaKey": c.schemaKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.schemas.delete" call. +func (c *SchemasDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes a schema.", + // "flatPath": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + // "httpMethod": "DELETE", + // "id": "directory.schemas.delete", + // "parameterOrder": [ + // "customerId", + // "schemaKey" + // ], + // "parameters": { + // "customerId": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "schemaKey": { + // "description": "Name or immutable ID of the schema.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.userschema" + // ] + // } + +} + +// method id "directory.schemas.get": + +type SchemasGetCall struct { + s *Service + customerId string + schemaKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a schema. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +// - schemaKey: Name or immutable ID of the schema. +func (r *SchemasService) Get(customerId string, schemaKey string) *SchemasGetCall { + c := &SchemasGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.schemaKey = schemaKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SchemasGetCall) Fields(s ...googleapi.Field) *SchemasGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SchemasGetCall) IfNoneMatch(entityTag string) *SchemasGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SchemasGetCall) Context(ctx context.Context) *SchemasGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SchemasGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SchemasGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "schemaKey": c.schemaKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.schemas.get" call. +// Exactly one of *Schema or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Schema.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SchemasGetCall) Do(opts ...googleapi.CallOption) (*Schema, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Schema{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a schema.", + // "flatPath": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + // "httpMethod": "GET", + // "id": "directory.schemas.get", + // "parameterOrder": [ + // "customerId", + // "schemaKey" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "schemaKey": { + // "description": "Name or immutable ID of the schema.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + // "response": { + // "$ref": "Schema" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.userschema", + // "https://www.googleapis.com/auth/admin.directory.userschema.readonly" + // ] + // } + +} + +// method id "directory.schemas.insert": + +type SchemasInsertCall struct { + s *Service + customerId string + schema *Schema + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a schema. +// +// - customerId: Immutable ID of the Google Workspace account. +func (r *SchemasService) Insert(customerId string, schema *Schema) *SchemasInsertCall { + c := &SchemasInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.schema = schema + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SchemasInsertCall) Fields(s ...googleapi.Field) *SchemasInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SchemasInsertCall) Context(ctx context.Context) *SchemasInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SchemasInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SchemasInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.schema) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/schemas") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.schemas.insert" call. +// Exactly one of *Schema or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Schema.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SchemasInsertCall) Do(opts ...googleapi.CallOption) (*Schema, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Schema{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a schema.", + // "flatPath": "admin/directory/v1/customer/{customerId}/schemas", + // "httpMethod": "POST", + // "id": "directory.schemas.insert", + // "parameterOrder": [ + // "customerId" + // ], + // "parameters": { + // "customerId": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/schemas", + // "request": { + // "$ref": "Schema" + // }, + // "response": { + // "$ref": "Schema" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.userschema" + // ] + // } + +} + +// method id "directory.schemas.list": + +type SchemasListCall struct { + s *Service + customerId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves all schemas for a customer. +// +// - customerId: The unique ID for the customer's Google Workspace +// account. In case of a multi-domain account, to fetch all groups for +// a customer, use this field instead of `domain`. You can also use +// the `my_customer` alias to represent your account's `customerId`. +// The `customerId` is also returned as part of the Users +// (/admin-sdk/directory/v1/reference/users) resource. You must +// provide either the `customer` or the `domain` parameter. +func (r *SchemasService) List(customerId string) *SchemasListCall { + c := &SchemasListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SchemasListCall) Fields(s ...googleapi.Field) *SchemasListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SchemasListCall) IfNoneMatch(entityTag string) *SchemasListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SchemasListCall) Context(ctx context.Context) *SchemasListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SchemasListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SchemasListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/schemas") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.schemas.list" call. +// Exactly one of *Schemas or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Schemas.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SchemasListCall) Do(opts ...googleapi.CallOption) (*Schemas, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Schemas{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves all schemas for a customer.", + // "flatPath": "admin/directory/v1/customer/{customerId}/schemas", + // "httpMethod": "GET", + // "id": "directory.schemas.list", + // "parameterOrder": [ + // "customerId" + // ], + // "parameters": { + // "customerId": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/schemas", + // "response": { + // "$ref": "Schemas" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.userschema", + // "https://www.googleapis.com/auth/admin.directory.userschema.readonly" + // ] + // } + +} + +// method id "directory.schemas.patch": + +type SchemasPatchCall struct { + s *Service + customerId string + schemaKey string + schema *Schema + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a schema. +// +// - customerId: Immutable ID of the Google Workspace account. +// - schemaKey: Name or immutable ID of the schema. +func (r *SchemasService) Patch(customerId string, schemaKey string, schema *Schema) *SchemasPatchCall { + c := &SchemasPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.schemaKey = schemaKey + c.schema = schema + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SchemasPatchCall) Fields(s ...googleapi.Field) *SchemasPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SchemasPatchCall) Context(ctx context.Context) *SchemasPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SchemasPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SchemasPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.schema) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "schemaKey": c.schemaKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.schemas.patch" call. +// Exactly one of *Schema or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Schema.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SchemasPatchCall) Do(opts ...googleapi.CallOption) (*Schema, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Schema{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a schema.", + // "flatPath": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + // "httpMethod": "PATCH", + // "id": "directory.schemas.patch", + // "parameterOrder": [ + // "customerId", + // "schemaKey" + // ], + // "parameters": { + // "customerId": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "schemaKey": { + // "description": "Name or immutable ID of the schema.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + // "request": { + // "$ref": "Schema" + // }, + // "response": { + // "$ref": "Schema" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.userschema" + // ] + // } + +} + +// method id "directory.schemas.update": + +type SchemasUpdateCall struct { + s *Service + customerId string + schemaKey string + schema *Schema + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a schema. +// +// - customerId: Immutable ID of the Google Workspace account. +// - schemaKey: Name or immutable ID of the schema. +func (r *SchemasService) Update(customerId string, schemaKey string, schema *Schema) *SchemasUpdateCall { + c := &SchemasUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.customerId = customerId + c.schemaKey = schemaKey + c.schema = schema + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SchemasUpdateCall) Fields(s ...googleapi.Field) *SchemasUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SchemasUpdateCall) Context(ctx context.Context) *SchemasUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SchemasUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SchemasUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.schema) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "customerId": c.customerId, + "schemaKey": c.schemaKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.schemas.update" call. +// Exactly one of *Schema or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Schema.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SchemasUpdateCall) Do(opts ...googleapi.CallOption) (*Schema, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Schema{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a schema.", + // "flatPath": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + // "httpMethod": "PUT", + // "id": "directory.schemas.update", + // "parameterOrder": [ + // "customerId", + // "schemaKey" + // ], + // "parameters": { + // "customerId": { + // "description": "Immutable ID of the Google Workspace account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "schemaKey": { + // "description": "Name or immutable ID of the schema.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/customer/{customerId}/schemas/{schemaKey}", + // "request": { + // "$ref": "Schema" + // }, + // "response": { + // "$ref": "Schema" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.userschema" + // ] + // } + +} + +// method id "directory.tokens.delete": + +type TokensDeleteCall struct { + s *Service + userKey string + clientId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes all access tokens issued by a user for an +// application. +// +// - clientId: The Client ID of the application the token is issued to. +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *TokensService) Delete(userKey string, clientId string) *TokensDeleteCall { + c := &TokensDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.clientId = clientId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TokensDeleteCall) Fields(s ...googleapi.Field) *TokensDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TokensDeleteCall) Context(ctx context.Context) *TokensDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TokensDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TokensDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/tokens/{clientId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + "clientId": c.clientId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.tokens.delete" call. +func (c *TokensDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes all access tokens issued by a user for an application.", + // "flatPath": "admin/directory/v1/users/{userKey}/tokens/{clientId}", + // "httpMethod": "DELETE", + // "id": "directory.tokens.delete", + // "parameterOrder": [ + // "userKey", + // "clientId" + // ], + // "parameters": { + // "clientId": { + // "description": "The Client ID of the application the token is issued to.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/tokens/{clientId}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} + +// method id "directory.tokens.get": + +type TokensGetCall struct { + s *Service + userKey string + clientId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets information about an access token issued by a user. +// +// - clientId: The Client ID of the application the token is issued to. +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *TokensService) Get(userKey string, clientId string) *TokensGetCall { + c := &TokensGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.clientId = clientId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TokensGetCall) Fields(s ...googleapi.Field) *TokensGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TokensGetCall) IfNoneMatch(entityTag string) *TokensGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TokensGetCall) Context(ctx context.Context) *TokensGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TokensGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TokensGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/tokens/{clientId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + "clientId": c.clientId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.tokens.get" call. +// Exactly one of *Token or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Token.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *TokensGetCall) Do(opts ...googleapi.CallOption) (*Token, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Token{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets information about an access token issued by a user.", + // "flatPath": "admin/directory/v1/users/{userKey}/tokens/{clientId}", + // "httpMethod": "GET", + // "id": "directory.tokens.get", + // "parameterOrder": [ + // "userKey", + // "clientId" + // ], + // "parameters": { + // "clientId": { + // "description": "The Client ID of the application the token is issued to.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/tokens/{clientId}", + // "response": { + // "$ref": "Token" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} + +// method id "directory.tokens.list": + +type TokensListCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns the set of tokens specified user has issued to 3rd +// party applications. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *TokensService) List(userKey string) *TokensListCall { + c := &TokensListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TokensListCall) Fields(s ...googleapi.Field) *TokensListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TokensListCall) IfNoneMatch(entityTag string) *TokensListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TokensListCall) Context(ctx context.Context) *TokensListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TokensListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TokensListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/tokens") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.tokens.list" call. +// Exactly one of *Tokens or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Tokens.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *TokensListCall) Do(opts ...googleapi.CallOption) (*Tokens, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Tokens{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the set of tokens specified user has issued to 3rd party applications.", + // "flatPath": "admin/directory/v1/users/{userKey}/tokens", + // "httpMethod": "GET", + // "id": "directory.tokens.list", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/tokens", + // "response": { + // "$ref": "Tokens" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} + +// method id "directory.twoStepVerification.turnOff": + +type TwoStepVerificationTurnOffCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TurnOff: Turns off 2-Step Verification for user. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *TwoStepVerificationService) TurnOff(userKey string) *TwoStepVerificationTurnOffCall { + c := &TwoStepVerificationTurnOffCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TwoStepVerificationTurnOffCall) Fields(s ...googleapi.Field) *TwoStepVerificationTurnOffCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TwoStepVerificationTurnOffCall) Context(ctx context.Context) *TwoStepVerificationTurnOffCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TwoStepVerificationTurnOffCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TwoStepVerificationTurnOffCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/twoStepVerification/turnOff") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.twoStepVerification.turnOff" call. +func (c *TwoStepVerificationTurnOffCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Turns off 2-Step Verification for user.", + // "flatPath": "admin/directory/v1/users/{userKey}/twoStepVerification/turnOff", + // "httpMethod": "POST", + // "id": "directory.twoStepVerification.turnOff", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/twoStepVerification/turnOff", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} + +// method id "directory.users.delete": + +type UsersDeleteCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a user. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersService) Delete(userKey string) *UsersDeleteCall { + c := &UsersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersDeleteCall) Fields(s ...googleapi.Field) *UsersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersDeleteCall) Context(ctx context.Context) *UsersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.delete" call. +func (c *UsersDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes a user.", + // "flatPath": "admin/directory/v1/users/{userKey}", + // "httpMethod": "DELETE", + // "id": "directory.users.delete", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user" + // ] + // } + +} + +// method id "directory.users.get": + +type UsersGetCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a user. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersService) Get(userKey string) *UsersGetCall { + c := &UsersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// CustomFieldMask sets the optional parameter "customFieldMask": A +// comma-separated list of schema names. All fields from these schemas +// are fetched. This should only be set when `projection=custom`. +func (c *UsersGetCall) CustomFieldMask(customFieldMask string) *UsersGetCall { + c.urlParams_.Set("customFieldMask", customFieldMask) + return c +} + +// Projection sets the optional parameter "projection": What subset of +// fields to fetch for this user. +// +// Possible values: +// +// "basic" (default) - Do not include any custom fields for the user. +// "custom" - Include custom fields from schemas requested in +// +// `customFieldMask`. +// +// "full" - Include all fields associated with this user. +func (c *UsersGetCall) Projection(projection string) *UsersGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ViewType sets the optional parameter "viewType": Whether to fetch the +// administrator-only or domain-wide public view of the user. For more +// information, see Retrieve a user as a non-administrator +// (/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin) +// . +// +// Possible values: +// +// "admin_view" (default) - Results include both administrator-only +// +// and domain-public fields for the user. +// +// "domain_public" - Results only include fields for the user that are +// +// publicly visible to other users in the domain. +func (c *UsersGetCall) ViewType(viewType string) *UsersGetCall { + c.urlParams_.Set("viewType", viewType) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersGetCall) Fields(s ...googleapi.Field) *UsersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UsersGetCall) IfNoneMatch(entityTag string) *UsersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersGetCall) Context(ctx context.Context) *UsersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.get" call. +// Exactly one of *User or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *User.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *UsersGetCall) Do(opts ...googleapi.CallOption) (*User, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &User{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a user.", + // "flatPath": "admin/directory/v1/users/{userKey}", + // "httpMethod": "GET", + // "id": "directory.users.get", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "customFieldMask": { + // "description": "A comma-separated list of schema names. All fields from these schemas are fetched. This should only be set when `projection=custom`.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "default": "basic", + // "description": "What subset of fields to fetch for this user.", + // "enum": [ + // "basic", + // "custom", + // "full" + // ], + // "enumDescriptions": [ + // "Do not include any custom fields for the user.", + // "Include custom fields from schemas requested in `customFieldMask`.", + // "Include all fields associated with this user." + // ], + // "location": "query", + // "type": "string" + // }, + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "viewType": { + // "default": "admin_view", + // "description": "Whether to fetch the administrator-only or domain-wide public view of the user. For more information, see [Retrieve a user as a non-administrator](/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin).", + // "enum": [ + // "admin_view", + // "domain_public" + // ], + // "enumDescriptions": [ + // "Results include both administrator-only and domain-public fields for the user.", + // "Results only include fields for the user that are publicly visible to other users in the domain." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}", + // "response": { + // "$ref": "User" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user", + // "https://www.googleapis.com/auth/admin.directory.user.readonly" + // ] + // } + +} + +// method id "directory.users.insert": + +type UsersInsertCall struct { + s *Service + user *User + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a user. Mutate calls immediately following user +// creation might sometimes fail as the user isn't fully created due to +// propagation delay in our backends. Check the error details for the +// "User creation is not complete" message to see if this is the case. +// Retrying the calls after some time can help in this case. +func (r *UsersService) Insert(user *User) *UsersInsertCall { + c := &UsersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.user = user + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersInsertCall) Fields(s ...googleapi.Field) *UsersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersInsertCall) Context(ctx context.Context) *UsersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.insert" call. +// Exactly one of *User or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *User.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *UsersInsertCall) Do(opts ...googleapi.CallOption) (*User, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &User{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a user. Mutate calls immediately following user creation might sometimes fail as the user isn't fully created due to propagation delay in our backends. Check the error details for the \"User creation is not complete\" message to see if this is the case. Retrying the calls after some time can help in this case.", + // "flatPath": "admin/directory/v1/users", + // "httpMethod": "POST", + // "id": "directory.users.insert", + // "parameterOrder": [], + // "parameters": {}, + // "path": "admin/directory/v1/users", + // "request": { + // "$ref": "User" + // }, + // "response": { + // "$ref": "User" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user" + // ] + // } + +} + +// method id "directory.users.list": + +type UsersListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a paginated list of either deleted users or all users +// in a domain. +func (r *UsersService) List() *UsersListCall { + c := &UsersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// CustomFieldMask sets the optional parameter "customFieldMask": A +// comma-separated list of schema names. All fields from these schemas +// are fetched. This should only be set when `projection=custom`. +func (c *UsersListCall) CustomFieldMask(customFieldMask string) *UsersListCall { + c.urlParams_.Set("customFieldMask", customFieldMask) + return c +} + +// Customer sets the optional parameter "customer": The unique ID for +// the customer's Google Workspace account. In case of a multi-domain +// account, to fetch all groups for a customer, use this field instead +// of `domain`. You can also use the `my_customer` alias to represent +// your account's `customerId`. The `customerId` is also returned as +// part of the Users (/admin-sdk/directory/v1/reference/users) resource. +// You must provide either the `customer` or the `domain` parameter. +func (c *UsersListCall) Customer(customer string) *UsersListCall { + c.urlParams_.Set("customer", customer) + return c +} + +// Domain sets the optional parameter "domain": The domain name. Use +// this field to get groups from only one domain. To return all domains +// for a customer account, use the `customer` query parameter instead. +// Either the `customer` or the `domain` parameter must be provided. +func (c *UsersListCall) Domain(domain string) *UsersListCall { + c.urlParams_.Set("domain", domain) + return c +} + +// Event sets the optional parameter "event": Event on which +// subscription is intended (if subscribing) +// +// Possible values: +// +// "add" - User Created Event +// "delete" - User Deleted Event +// "makeAdmin" - User Admin Status Change Event +// "undelete" - User Undeleted Event +// "update" - User Updated Event +func (c *UsersListCall) Event(event string) *UsersListCall { + c.urlParams_.Set("event", event) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. +func (c *UsersListCall) MaxResults(maxResults int64) *UsersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Property to use for +// sorting results. +// +// Possible values: +// +// "email" - Primary email of the user. +// "familyName" - User's family name. +// "givenName" - User's given name. +func (c *UsersListCall) OrderBy(orderBy string) *UsersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// next page in the list +func (c *UsersListCall) PageToken(pageToken string) *UsersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Projection sets the optional parameter "projection": What subset of +// fields to fetch for this user. +// +// Possible values: +// +// "basic" (default) - Do not include any custom fields for the user. +// "custom" - Include custom fields from schemas requested in +// +// `customFieldMask`. +// +// "full" - Include all fields associated with this user. +func (c *UsersListCall) Projection(projection string) *UsersListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Query sets the optional parameter "query": Query string for searching +// user fields. For more information on constructing user queries, see +// Search for Users (/admin-sdk/directory/v1/guides/search-users). +func (c *UsersListCall) Query(query string) *UsersListCall { + c.urlParams_.Set("query", query) + return c +} + +// ShowDeleted sets the optional parameter "showDeleted": If set to +// `true`, retrieves the list of deleted users. (Default: `false`) +func (c *UsersListCall) ShowDeleted(showDeleted string) *UsersListCall { + c.urlParams_.Set("showDeleted", showDeleted) + return c +} + +// SortOrder sets the optional parameter "sortOrder": Whether to return +// results in ascending or descending order, ignoring case. +// +// Possible values: +// +// "ASCENDING" - Ascending order. +// "DESCENDING" - Descending order. +func (c *UsersListCall) SortOrder(sortOrder string) *UsersListCall { + c.urlParams_.Set("sortOrder", sortOrder) + return c +} + +// ViewType sets the optional parameter "viewType": Whether to fetch the +// administrator-only or domain-wide public view of the user. For more +// information, see Retrieve a user as a non-administrator +// (/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin) +// . +// +// Possible values: +// +// "admin_view" (default) - Results include both administrator-only +// +// and domain-public fields for the user. +// +// "domain_public" - Results only include fields for the user that are +// +// publicly visible to other users in the domain. +func (c *UsersListCall) ViewType(viewType string) *UsersListCall { + c.urlParams_.Set("viewType", viewType) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersListCall) Fields(s ...googleapi.Field) *UsersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UsersListCall) IfNoneMatch(entityTag string) *UsersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersListCall) Context(ctx context.Context) *UsersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.list" call. +// Exactly one of *Users or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Users.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *UsersListCall) Do(opts ...googleapi.CallOption) (*Users, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Users{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a paginated list of either deleted users or all users in a domain.", + // "flatPath": "admin/directory/v1/users", + // "httpMethod": "GET", + // "id": "directory.users.list", + // "parameterOrder": [], + // "parameters": { + // "customFieldMask": { + // "description": "A comma-separated list of schema names. All fields from these schemas are fetched. This should only be set when `projection=custom`.", + // "location": "query", + // "type": "string" + // }, + // "customer": { + // "description": "The unique ID for the customer's Google Workspace account. In case of a multi-domain account, to fetch all groups for a customer, use this field instead of `domain`. You can also use the `my_customer` alias to represent your account's `customerId`. The `customerId` is also returned as part of the [Users](/admin-sdk/directory/v1/reference/users) resource. You must provide either the `customer` or the `domain` parameter.", + // "location": "query", + // "type": "string" + // }, + // "domain": { + // "description": "The domain name. Use this field to get groups from only one domain. To return all domains for a customer account, use the `customer` query parameter instead. Either the `customer` or the `domain` parameter must be provided.", + // "location": "query", + // "type": "string" + // }, + // "event": { + // "description": "Event on which subscription is intended (if subscribing)", + // "enum": [ + // "add", + // "delete", + // "makeAdmin", + // "undelete", + // "update" + // ], + // "enumDescriptions": [ + // "User Created Event", + // "User Deleted Event", + // "User Admin Status Change Event", + // "User Undeleted Event", + // "User Updated Event" + // ], + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "100", + // "description": "Maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "maximum": "500", + // "minimum": "1", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Property to use for sorting results.", + // "enum": [ + // "email", + // "familyName", + // "givenName" + // ], + // "enumDescriptions": [ + // "Primary email of the user.", + // "User's family name.", + // "User's given name." + // ], + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Token to specify next page in the list", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "default": "basic", + // "description": "What subset of fields to fetch for this user.", + // "enum": [ + // "basic", + // "custom", + // "full" + // ], + // "enumDescriptions": [ + // "Do not include any custom fields for the user.", + // "Include custom fields from schemas requested in `customFieldMask`.", + // "Include all fields associated with this user." + // ], + // "location": "query", + // "type": "string" + // }, + // "query": { + // "description": "Query string for searching user fields. For more information on constructing user queries, see [Search for Users](/admin-sdk/directory/v1/guides/search-users).", + // "location": "query", + // "type": "string" + // }, + // "showDeleted": { + // "description": "If set to `true`, retrieves the list of deleted users. (Default: `false`)", + // "location": "query", + // "type": "string" + // }, + // "sortOrder": { + // "description": "Whether to return results in ascending or descending order, ignoring case.", + // "enum": [ + // "ASCENDING", + // "DESCENDING" + // ], + // "enumDescriptions": [ + // "Ascending order.", + // "Descending order." + // ], + // "location": "query", + // "type": "string" + // }, + // "viewType": { + // "default": "admin_view", + // "description": "Whether to fetch the administrator-only or domain-wide public view of the user. For more information, see [Retrieve a user as a non-administrator](/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin).", + // "enum": [ + // "admin_view", + // "domain_public" + // ], + // "enumDescriptions": [ + // "Results include both administrator-only and domain-public fields for the user.", + // "Results only include fields for the user that are publicly visible to other users in the domain." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users", + // "response": { + // "$ref": "Users" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user", + // "https://www.googleapis.com/auth/admin.directory.user.readonly", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *UsersListCall) Pages(ctx context.Context, f func(*Users) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "directory.users.makeAdmin": + +type UsersMakeAdminCall struct { + s *Service + userKey string + usermakeadmin *UserMakeAdmin + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// MakeAdmin: Makes a user a super administrator. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersService) MakeAdmin(userKey string, usermakeadmin *UserMakeAdmin) *UsersMakeAdminCall { + c := &UsersMakeAdminCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.usermakeadmin = usermakeadmin + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersMakeAdminCall) Fields(s ...googleapi.Field) *UsersMakeAdminCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersMakeAdminCall) Context(ctx context.Context) *UsersMakeAdminCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersMakeAdminCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersMakeAdminCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.usermakeadmin) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/makeAdmin") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.makeAdmin" call. +func (c *UsersMakeAdminCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Makes a user a super administrator.", + // "flatPath": "admin/directory/v1/users/{userKey}/makeAdmin", + // "httpMethod": "POST", + // "id": "directory.users.makeAdmin", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/makeAdmin", + // "request": { + // "$ref": "UserMakeAdmin" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user" + // ] + // } + +} + +// method id "directory.users.patch": + +type UsersPatchCall struct { + s *Service + userKey string + user *User + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a user using patch semantics. The update method should +// be used instead, because it also supports patch semantics and has +// better performance. If you're mapping an external identity to a +// Google identity, use the `update` +// (https://developers.google.com/admin-sdk/directory/v1/reference/users/update) +// method instead of the `patch` method. This method is unable to clear +// fields that contain repeated objects (`addresses`, `phones`, etc). +// Use the update method instead. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersService) Patch(userKey string, user *User) *UsersPatchCall { + c := &UsersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.user = user + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersPatchCall) Fields(s ...googleapi.Field) *UsersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersPatchCall) Context(ctx context.Context) *UsersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.patch" call. +// Exactly one of *User or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *User.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *UsersPatchCall) Do(opts ...googleapi.CallOption) (*User, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &User{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a user using patch semantics. The update method should be used instead, because it also supports patch semantics and has better performance. If you're mapping an external identity to a Google identity, use the [`update`](https://developers.google.com/admin-sdk/directory/v1/reference/users/update) method instead of the `patch` method. This method is unable to clear fields that contain repeated objects (`addresses`, `phones`, etc). Use the update method instead.", + // "flatPath": "admin/directory/v1/users/{userKey}", + // "httpMethod": "PATCH", + // "id": "directory.users.patch", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}", + // "request": { + // "$ref": "User" + // }, + // "response": { + // "$ref": "User" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user" + // ] + // } + +} + +// method id "directory.users.signOut": + +type UsersSignOutCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SignOut: Signs a user out of all web and device sessions and reset +// their sign-in cookies. User will have to sign in by authenticating +// again. +// +// - userKey: Identifies the target user in the API request. The value +// can be the user's primary email address, alias email address, or +// unique user ID. +func (r *UsersService) SignOut(userKey string) *UsersSignOutCall { + c := &UsersSignOutCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersSignOutCall) Fields(s ...googleapi.Field) *UsersSignOutCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersSignOutCall) Context(ctx context.Context) *UsersSignOutCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersSignOutCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersSignOutCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/signOut") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.signOut" call. +func (c *UsersSignOutCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Signs a user out of all web and device sessions and reset their sign-in cookies. User will have to sign in by authenticating again.", + // "flatPath": "admin/directory/v1/users/{userKey}/signOut", + // "httpMethod": "POST", + // "id": "directory.users.signOut", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the target user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/signOut", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} + +// method id "directory.users.undelete": + +type UsersUndeleteCall struct { + s *Service + userKey string + userundelete *UserUndelete + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Undelete: Undeletes a deleted user. +// +// - userKey: The immutable id of the user. +func (r *UsersService) Undelete(userKey string, userundelete *UserUndelete) *UsersUndeleteCall { + c := &UsersUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.userundelete = userundelete + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersUndeleteCall) Fields(s ...googleapi.Field) *UsersUndeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersUndeleteCall) Context(ctx context.Context) *UsersUndeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersUndeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersUndeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.userundelete) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/undelete") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.undelete" call. +func (c *UsersUndeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Undeletes a deleted user.", + // "flatPath": "admin/directory/v1/users/{userKey}/undelete", + // "httpMethod": "POST", + // "id": "directory.users.undelete", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "The immutable id of the user", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/undelete", + // "request": { + // "$ref": "UserUndelete" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user" + // ] + // } + +} + +// method id "directory.users.update": + +type UsersUpdateCall struct { + s *Service + userKey string + user *User + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a user. This method supports patch semantics, meaning +// that you only need to include the fields you wish to update. Fields +// that are not present in the request will be preserved, and fields set +// to `null` will be cleared. For repeating fields that contain arrays, +// individual items in the array can't be patched piecemeal; they must +// be supplied in the request body with the desired values for all +// items. See the user accounts guide +// (https://developers.google.com/admin-sdk/directory/v1/guides/manage-users#update_user) +// for more information. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersService) Update(userKey string, user *User) *UsersUpdateCall { + c := &UsersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.user = user + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersUpdateCall) Fields(s ...googleapi.Field) *UsersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersUpdateCall) Context(ctx context.Context) *UsersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.user) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.update" call. +// Exactly one of *User or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *User.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *UsersUpdateCall) Do(opts ...googleapi.CallOption) (*User, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &User{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a user. This method supports patch semantics, meaning that you only need to include the fields you wish to update. Fields that are not present in the request will be preserved, and fields set to `null` will be cleared. For repeating fields that contain arrays, individual items in the array can't be patched piecemeal; they must be supplied in the request body with the desired values for all items. See the [user accounts guide](https://developers.google.com/admin-sdk/directory/v1/guides/manage-users#update_user) for more information.", + // "flatPath": "admin/directory/v1/users/{userKey}", + // "httpMethod": "PUT", + // "id": "directory.users.update", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}", + // "request": { + // "$ref": "User" + // }, + // "response": { + // "$ref": "User" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user" + // ] + // } + +} + +// method id "directory.users.watch": + +type UsersWatchCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Watch: Watches for changes in users list. +func (r *UsersService) Watch(channel *Channel) *UsersWatchCall { + c := &UsersWatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel + return c +} + +// CustomFieldMask sets the optional parameter "customFieldMask": +// Comma-separated list of schema names. All fields from these schemas +// are fetched. This should only be set when projection=custom. +func (c *UsersWatchCall) CustomFieldMask(customFieldMask string) *UsersWatchCall { + c.urlParams_.Set("customFieldMask", customFieldMask) + return c +} + +// Customer sets the optional parameter "customer": Immutable ID of the +// Google Workspace account. In case of multi-domain, to fetch all users +// for a customer, fill this field instead of domain. +func (c *UsersWatchCall) Customer(customer string) *UsersWatchCall { + c.urlParams_.Set("customer", customer) + return c +} + +// Domain sets the optional parameter "domain": Name of the domain. Fill +// this field to get users from only this domain. To return all users in +// a multi-domain fill customer field instead." +func (c *UsersWatchCall) Domain(domain string) *UsersWatchCall { + c.urlParams_.Set("domain", domain) + return c +} + +// Event sets the optional parameter "event": Events to watch for. +// +// Possible values: +// +// "add" - User Created Event +// "delete" - User Deleted Event +// "makeAdmin" - User Admin Status Change Event +// "undelete" - User Undeleted Event +// "update" - User Updated Event +func (c *UsersWatchCall) Event(event string) *UsersWatchCall { + c.urlParams_.Set("event", event) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. +func (c *UsersWatchCall) MaxResults(maxResults int64) *UsersWatchCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Column to use for +// sorting results +// +// Possible values: +// +// "email" - Primary email of the user. +// "familyName" - User's family name. +// "givenName" - User's given name. +func (c *UsersWatchCall) OrderBy(orderBy string) *UsersWatchCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// next page in the list +func (c *UsersWatchCall) PageToken(pageToken string) *UsersWatchCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Projection sets the optional parameter "projection": What subset of +// fields to fetch for this user. +// +// Possible values: +// +// "basic" (default) - Do not include any custom fields for the user. +// "custom" - Include custom fields from schemas mentioned in +// +// customFieldMask. +// +// "full" - Include all fields associated with this user. +func (c *UsersWatchCall) Projection(projection string) *UsersWatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Query sets the optional parameter "query": Query string search. +// Should be of the form "". Complete documentation is at https: +// //developers.google.com/admin-sdk/directory/v1/guides/search-users +func (c *UsersWatchCall) Query(query string) *UsersWatchCall { + c.urlParams_.Set("query", query) + return c +} + +// ShowDeleted sets the optional parameter "showDeleted": If set to +// true, retrieves the list of deleted users. (Default: false) +func (c *UsersWatchCall) ShowDeleted(showDeleted string) *UsersWatchCall { + c.urlParams_.Set("showDeleted", showDeleted) + return c +} + +// SortOrder sets the optional parameter "sortOrder": Whether to return +// results in ascending or descending order. +// +// Possible values: +// +// "ASCENDING" - Ascending order. +// "DESCENDING" - Descending order. +func (c *UsersWatchCall) SortOrder(sortOrder string) *UsersWatchCall { + c.urlParams_.Set("sortOrder", sortOrder) + return c +} + +// ViewType sets the optional parameter "viewType": Whether to fetch the +// administrator-only or domain-wide public view of the user. For more +// information, see Retrieve a user as a non-administrator +// (/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin) +// . +// +// Possible values: +// +// "admin_view" (default) - Results include both administrator-only +// +// and domain-public fields. +// +// "domain_public" - Results only include fields for the user that are +// +// publicly visible to other users in the domain. +func (c *UsersWatchCall) ViewType(viewType string) *UsersWatchCall { + c.urlParams_.Set("viewType", viewType) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersWatchCall) Fields(s ...googleapi.Field) *UsersWatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersWatchCall) Context(ctx context.Context) *UsersWatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersWatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersWatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/watch") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.watch" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *UsersWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Watches for changes in users list.", + // "flatPath": "admin/directory/v1/users/watch", + // "httpMethod": "POST", + // "id": "directory.users.watch", + // "parameterOrder": [], + // "parameters": { + // "customFieldMask": { + // "description": "Comma-separated list of schema names. All fields from these schemas are fetched. This should only be set when projection=custom.", + // "location": "query", + // "type": "string" + // }, + // "customer": { + // "description": "Immutable ID of the Google Workspace account. In case of multi-domain, to fetch all users for a customer, fill this field instead of domain.", + // "location": "query", + // "type": "string" + // }, + // "domain": { + // "description": "Name of the domain. Fill this field to get users from only this domain. To return all users in a multi-domain fill customer field instead.\"", + // "location": "query", + // "type": "string" + // }, + // "event": { + // "description": "Events to watch for.", + // "enum": [ + // "add", + // "delete", + // "makeAdmin", + // "undelete", + // "update" + // ], + // "enumDescriptions": [ + // "User Created Event", + // "User Deleted Event", + // "User Admin Status Change Event", + // "User Undeleted Event", + // "User Updated Event" + // ], + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "100", + // "description": "Maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "maximum": "500", + // "minimum": "1", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Column to use for sorting results", + // "enum": [ + // "email", + // "familyName", + // "givenName" + // ], + // "enumDescriptions": [ + // "Primary email of the user.", + // "User's family name.", + // "User's given name." + // ], + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Token to specify next page in the list", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "default": "basic", + // "description": "What subset of fields to fetch for this user.", + // "enum": [ + // "basic", + // "custom", + // "full" + // ], + // "enumDescriptions": [ + // "Do not include any custom fields for the user.", + // "Include custom fields from schemas mentioned in customFieldMask.", + // "Include all fields associated with this user." + // ], + // "location": "query", + // "type": "string" + // }, + // "query": { + // "description": "Query string search. Should be of the form \"\". Complete documentation is at https: //developers.google.com/admin-sdk/directory/v1/guides/search-users", + // "location": "query", + // "type": "string" + // }, + // "showDeleted": { + // "description": "If set to true, retrieves the list of deleted users. (Default: false)", + // "location": "query", + // "type": "string" + // }, + // "sortOrder": { + // "description": "Whether to return results in ascending or descending order.", + // "enum": [ + // "ASCENDING", + // "DESCENDING" + // ], + // "enumDescriptions": [ + // "Ascending order.", + // "Descending order." + // ], + // "location": "query", + // "type": "string" + // }, + // "viewType": { + // "default": "admin_view", + // "description": "Whether to fetch the administrator-only or domain-wide public view of the user. For more information, see [Retrieve a user as a non-administrator](/admin-sdk/directory/v1/guides/manage-users#retrieve_users_non_admin).", + // "enum": [ + // "admin_view", + // "domain_public" + // ], + // "enumDescriptions": [ + // "Results include both administrator-only and domain-public fields.", + // "Results only include fields for the user that are publicly visible to other users in the domain." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/watch", + // "request": { + // "$ref": "Channel" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user", + // "https://www.googleapis.com/auth/admin.directory.user.readonly", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "directory.users.aliases.delete": + +type UsersAliasesDeleteCall struct { + s *Service + userKey string + alias string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Removes an alias. +// +// - alias: The alias to be removed. +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersAliasesService) Delete(userKey string, alias string) *UsersAliasesDeleteCall { + c := &UsersAliasesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.alias = alias + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersAliasesDeleteCall) Fields(s ...googleapi.Field) *UsersAliasesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersAliasesDeleteCall) Context(ctx context.Context) *UsersAliasesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersAliasesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersAliasesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/aliases/{alias}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + "alias": c.alias, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.aliases.delete" call. +func (c *UsersAliasesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Removes an alias.", + // "flatPath": "admin/directory/v1/users/{userKey}/aliases/{alias}", + // "httpMethod": "DELETE", + // "id": "directory.users.aliases.delete", + // "parameterOrder": [ + // "userKey", + // "alias" + // ], + // "parameters": { + // "alias": { + // "description": "The alias to be removed.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/aliases/{alias}", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user", + // "https://www.googleapis.com/auth/admin.directory.user.alias" + // ] + // } + +} + +// method id "directory.users.aliases.insert": + +type UsersAliasesInsertCall struct { + s *Service + userKey string + alias *Alias + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Adds an alias. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersAliasesService) Insert(userKey string, alias *Alias) *UsersAliasesInsertCall { + c := &UsersAliasesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.alias = alias + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersAliasesInsertCall) Fields(s ...googleapi.Field) *UsersAliasesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersAliasesInsertCall) Context(ctx context.Context) *UsersAliasesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersAliasesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersAliasesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.alias) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/aliases") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.aliases.insert" call. +// Exactly one of *Alias or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Alias.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *UsersAliasesInsertCall) Do(opts ...googleapi.CallOption) (*Alias, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Alias{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds an alias.", + // "flatPath": "admin/directory/v1/users/{userKey}/aliases", + // "httpMethod": "POST", + // "id": "directory.users.aliases.insert", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/aliases", + // "request": { + // "$ref": "Alias" + // }, + // "response": { + // "$ref": "Alias" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user", + // "https://www.googleapis.com/auth/admin.directory.user.alias" + // ] + // } + +} + +// method id "directory.users.aliases.list": + +type UsersAliasesListCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all aliases for a user. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersAliasesService) List(userKey string) *UsersAliasesListCall { + c := &UsersAliasesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Event sets the optional parameter "event": Events to watch for. +// +// Possible values: +// +// "add" - Alias Created Event +// "delete" - Alias Deleted Event +func (c *UsersAliasesListCall) Event(event string) *UsersAliasesListCall { + c.urlParams_.Set("event", event) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersAliasesListCall) Fields(s ...googleapi.Field) *UsersAliasesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UsersAliasesListCall) IfNoneMatch(entityTag string) *UsersAliasesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersAliasesListCall) Context(ctx context.Context) *UsersAliasesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersAliasesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersAliasesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/aliases") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.aliases.list" call. +// Exactly one of *Aliases or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Aliases.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *UsersAliasesListCall) Do(opts ...googleapi.CallOption) (*Aliases, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Aliases{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all aliases for a user.", + // "flatPath": "admin/directory/v1/users/{userKey}/aliases", + // "httpMethod": "GET", + // "id": "directory.users.aliases.list", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "event": { + // "description": "Events to watch for.", + // "enum": [ + // "add", + // "delete" + // ], + // "enumDescriptions": [ + // "Alias Created Event", + // "Alias Deleted Event" + // ], + // "location": "query", + // "type": "string" + // }, + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/aliases", + // "response": { + // "$ref": "Aliases" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user", + // "https://www.googleapis.com/auth/admin.directory.user.alias", + // "https://www.googleapis.com/auth/admin.directory.user.alias.readonly", + // "https://www.googleapis.com/auth/admin.directory.user.readonly" + // ] + // } + +} + +// method id "directory.users.aliases.watch": + +type UsersAliasesWatchCall struct { + s *Service + userKey string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Watch: Watches for changes in users list. +// +// - userKey: Email or immutable ID of the user. +func (r *UsersAliasesService) Watch(userKey string, channel *Channel) *UsersAliasesWatchCall { + c := &UsersAliasesWatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.channel = channel + return c +} + +// Event sets the optional parameter "event": Events to watch for. +// +// Possible values: +// +// "add" - Alias Created Event +// "delete" - Alias Deleted Event +func (c *UsersAliasesWatchCall) Event(event string) *UsersAliasesWatchCall { + c.urlParams_.Set("event", event) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersAliasesWatchCall) Fields(s ...googleapi.Field) *UsersAliasesWatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersAliasesWatchCall) Context(ctx context.Context) *UsersAliasesWatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersAliasesWatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersAliasesWatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/aliases/watch") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.aliases.watch" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *UsersAliasesWatchCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Watches for changes in users list.", + // "flatPath": "admin/directory/v1/users/{userKey}/aliases/watch", + // "httpMethod": "POST", + // "id": "directory.users.aliases.watch", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "event": { + // "description": "Events to watch for.", + // "enum": [ + // "add", + // "delete" + // ], + // "enumDescriptions": [ + // "Alias Created Event", + // "Alias Deleted Event" + // ], + // "location": "query", + // "type": "string" + // }, + // "userKey": { + // "description": "Email or immutable ID of the user", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/aliases/watch", + // "request": { + // "$ref": "Channel" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user", + // "https://www.googleapis.com/auth/admin.directory.user.alias", + // "https://www.googleapis.com/auth/admin.directory.user.alias.readonly", + // "https://www.googleapis.com/auth/admin.directory.user.readonly" + // ] + // } + +} + +// method id "directory.users.photos.delete": + +type UsersPhotosDeleteCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Removes the user's photo. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersPhotosService) Delete(userKey string) *UsersPhotosDeleteCall { + c := &UsersPhotosDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersPhotosDeleteCall) Fields(s ...googleapi.Field) *UsersPhotosDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersPhotosDeleteCall) Context(ctx context.Context) *UsersPhotosDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersPhotosDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersPhotosDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/photos/thumbnail") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.photos.delete" call. +func (c *UsersPhotosDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Removes the user's photo.", + // "flatPath": "admin/directory/v1/users/{userKey}/photos/thumbnail", + // "httpMethod": "DELETE", + // "id": "directory.users.photos.delete", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/photos/thumbnail", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user" + // ] + // } + +} + +// method id "directory.users.photos.get": + +type UsersPhotosGetCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the user's photo. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersPhotosService) Get(userKey string) *UsersPhotosGetCall { + c := &UsersPhotosGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersPhotosGetCall) Fields(s ...googleapi.Field) *UsersPhotosGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UsersPhotosGetCall) IfNoneMatch(entityTag string) *UsersPhotosGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersPhotosGetCall) Context(ctx context.Context) *UsersPhotosGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersPhotosGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersPhotosGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/photos/thumbnail") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.photos.get" call. +// Exactly one of *UserPhoto or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *UserPhoto.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UsersPhotosGetCall) Do(opts ...googleapi.CallOption) (*UserPhoto, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &UserPhoto{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the user's photo.", + // "flatPath": "admin/directory/v1/users/{userKey}/photos/thumbnail", + // "httpMethod": "GET", + // "id": "directory.users.photos.get", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/photos/thumbnail", + // "response": { + // "$ref": "UserPhoto" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user", + // "https://www.googleapis.com/auth/admin.directory.user.readonly" + // ] + // } + +} + +// method id "directory.users.photos.patch": + +type UsersPhotosPatchCall struct { + s *Service + userKey string + userphoto *UserPhoto + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Adds a photo for the user. This method supports patch +// semantics (/admin-sdk/directory/v1/guides/performance#patch). +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersPhotosService) Patch(userKey string, userphoto *UserPhoto) *UsersPhotosPatchCall { + c := &UsersPhotosPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.userphoto = userphoto + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersPhotosPatchCall) Fields(s ...googleapi.Field) *UsersPhotosPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersPhotosPatchCall) Context(ctx context.Context) *UsersPhotosPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersPhotosPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersPhotosPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.userphoto) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/photos/thumbnail") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.photos.patch" call. +// Exactly one of *UserPhoto or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *UserPhoto.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UsersPhotosPatchCall) Do(opts ...googleapi.CallOption) (*UserPhoto, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &UserPhoto{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a photo for the user. This method supports [patch semantics](/admin-sdk/directory/v1/guides/performance#patch).", + // "flatPath": "admin/directory/v1/users/{userKey}/photos/thumbnail", + // "httpMethod": "PATCH", + // "id": "directory.users.photos.patch", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/photos/thumbnail", + // "request": { + // "$ref": "UserPhoto" + // }, + // "response": { + // "$ref": "UserPhoto" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user" + // ] + // } + +} + +// method id "directory.users.photos.update": + +type UsersPhotosUpdateCall struct { + s *Service + userKey string + userphoto *UserPhoto + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Adds a photo for the user. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *UsersPhotosService) Update(userKey string, userphoto *UserPhoto) *UsersPhotosUpdateCall { + c := &UsersPhotosUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + c.userphoto = userphoto + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UsersPhotosUpdateCall) Fields(s ...googleapi.Field) *UsersPhotosUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UsersPhotosUpdateCall) Context(ctx context.Context) *UsersPhotosUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UsersPhotosUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UsersPhotosUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.userphoto) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/photos/thumbnail") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.users.photos.update" call. +// Exactly one of *UserPhoto or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *UserPhoto.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UsersPhotosUpdateCall) Do(opts ...googleapi.CallOption) (*UserPhoto, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &UserPhoto{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a photo for the user.", + // "flatPath": "admin/directory/v1/users/{userKey}/photos/thumbnail", + // "httpMethod": "PUT", + // "id": "directory.users.photos.update", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/photos/thumbnail", + // "request": { + // "$ref": "UserPhoto" + // }, + // "response": { + // "$ref": "UserPhoto" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user" + // ] + // } + +} + +// method id "directory.verificationCodes.generate": + +type VerificationCodesGenerateCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Generate: Generates new backup verification codes for the user. +// +// - userKey: Email or immutable ID of the user. +func (r *VerificationCodesService) Generate(userKey string) *VerificationCodesGenerateCall { + c := &VerificationCodesGenerateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VerificationCodesGenerateCall) Fields(s ...googleapi.Field) *VerificationCodesGenerateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VerificationCodesGenerateCall) Context(ctx context.Context) *VerificationCodesGenerateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VerificationCodesGenerateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VerificationCodesGenerateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/verificationCodes/generate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.verificationCodes.generate" call. +func (c *VerificationCodesGenerateCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Generates new backup verification codes for the user.", + // "flatPath": "admin/directory/v1/users/{userKey}/verificationCodes/generate", + // "httpMethod": "POST", + // "id": "directory.verificationCodes.generate", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Email or immutable ID of the user", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/verificationCodes/generate", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} + +// method id "directory.verificationCodes.invalidate": + +type VerificationCodesInvalidateCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Invalidate: Invalidates the current backup verification codes for the +// user. +// +// - userKey: Email or immutable ID of the user. +func (r *VerificationCodesService) Invalidate(userKey string) *VerificationCodesInvalidateCall { + c := &VerificationCodesInvalidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VerificationCodesInvalidateCall) Fields(s ...googleapi.Field) *VerificationCodesInvalidateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VerificationCodesInvalidateCall) Context(ctx context.Context) *VerificationCodesInvalidateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VerificationCodesInvalidateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VerificationCodesInvalidateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/verificationCodes/invalidate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.verificationCodes.invalidate" call. +func (c *VerificationCodesInvalidateCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Invalidates the current backup verification codes for the user.", + // "flatPath": "admin/directory/v1/users/{userKey}/verificationCodes/invalidate", + // "httpMethod": "POST", + // "id": "directory.verificationCodes.invalidate", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Email or immutable ID of the user", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/verificationCodes/invalidate", + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} + +// method id "directory.verificationCodes.list": + +type VerificationCodesListCall struct { + s *Service + userKey string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns the current set of valid backup verification codes for +// the specified user. +// +// - userKey: Identifies the user in the API request. The value can be +// the user's primary email address, alias email address, or unique +// user ID. +func (r *VerificationCodesService) List(userKey string) *VerificationCodesListCall { + c := &VerificationCodesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.userKey = userKey + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VerificationCodesListCall) Fields(s ...googleapi.Field) *VerificationCodesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VerificationCodesListCall) IfNoneMatch(entityTag string) *VerificationCodesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VerificationCodesListCall) Context(ctx context.Context) *VerificationCodesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VerificationCodesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VerificationCodesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "admin/directory/v1/users/{userKey}/verificationCodes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "userKey": c.userKey, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "directory.verificationCodes.list" call. +// Exactly one of *VerificationCodes or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *VerificationCodes.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VerificationCodesListCall) Do(opts ...googleapi.CallOption) (*VerificationCodes, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &VerificationCodes{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the current set of valid backup verification codes for the specified user.", + // "flatPath": "admin/directory/v1/users/{userKey}/verificationCodes", + // "httpMethod": "GET", + // "id": "directory.verificationCodes.list", + // "parameterOrder": [ + // "userKey" + // ], + // "parameters": { + // "userKey": { + // "description": "Identifies the user in the API request. The value can be the user's primary email address, alias email address, or unique user ID.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "admin/directory/v1/users/{userKey}/verificationCodes", + // "response": { + // "$ref": "VerificationCodes" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/admin.directory.user.security" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 693a1b1a..f39dd00d 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -15,6 +15,7 @@ import ( "github.com/google/uuid" "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" ) // Use this error type to return an error which allows introspection of both @@ -43,6 +44,16 @@ func (e wrappedCallErr) Is(target error) bool { // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { @@ -77,6 +88,16 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 46ad187e..6fdda3b7 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.126.0" +const Version = "0.134.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index edebc73a..62120711 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"34333739363230323936363635393736363430\"", + "etag": "\"39353535313838393033333032363632303533\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -1311,7 +1311,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1357,7 +1357,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1399,7 +1399,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1444,7 +1444,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1493,7 +1493,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1545,7 +1545,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1588,7 +1588,7 @@ "type": "string" }, "destinationObject": { - "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1662,7 +1662,7 @@ ], "parameters": { "destinationBucket": { - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1773,7 +1773,7 @@ "type": "string" }, "sourceObject": { - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1843,7 +1843,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1907,7 +1907,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1967,7 +1967,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2054,7 +2054,7 @@ "type": "string" }, "name": { - "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "query", "type": "string" }, @@ -2252,7 +2252,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2332,7 +2332,7 @@ "type": "string" }, "destinationObject": { - "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2443,7 +2443,7 @@ "type": "string" }, "sourceObject": { - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2489,7 +2489,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2536,7 +2536,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2612,7 +2612,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -3010,7 +3010,7 @@ } } }, - "revision": "20230301", + "revision": "20230710", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index e11bf2e6..69a6e41e 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -7260,7 +7260,8 @@ type ObjectAccessControlsDeleteCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7375,7 +7376,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7416,7 +7417,8 @@ type ObjectAccessControlsGetCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7569,7 +7571,7 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7608,7 +7610,8 @@ type ObjectAccessControlsInsertCall struct { // // - bucket: Name of a bucket. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7745,7 +7748,7 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7787,7 +7790,8 @@ type ObjectAccessControlsListCall struct { // // - bucket: Name of a bucket. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7931,7 +7935,7 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7974,7 +7978,8 @@ type ObjectAccessControlsPatchCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8120,7 +8125,7 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -8166,7 +8171,8 @@ type ObjectAccessControlsUpdateCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8312,7 +8318,7 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -8357,7 +8363,8 @@ type ObjectsComposeCall struct { // objects. The destination object is stored in this bucket. // - destinationObject: Name of the new object. For information about // how to URL encode object names to be path safe, see Encoding URI -// Path Parts. +// Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.destinationBucket = destinationBucket @@ -8540,7 +8547,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "destinationObject": { - // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -8625,7 +8632,8 @@ type ObjectsCopyCall struct { // - destinationBucket: Name of the bucket in which to store the new // object. Overrides the provided object metadata's bucket value, if // any.For information about how to URL encode object names to be path -// safe, see Encoding URI Path Parts. +// safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). // - destinationObject: Name of the new object. Required when the object // metadata is not otherwise provided. Overrides the object metadata's // name value, if any. @@ -8633,7 +8641,8 @@ type ObjectsCopyCall struct { // object. // - sourceObject: Name of the source object. For information about how // to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -8894,7 +8903,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // ], // "parameters": { // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9005,7 +9014,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9049,7 +9058,8 @@ type ObjectsDeleteCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9215,7 +9225,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9252,7 +9262,8 @@ type ObjectsGetCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9484,7 +9495,7 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9541,7 +9552,8 @@ type ObjectsGetIamPolicyCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9685,7 +9697,7 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9797,7 +9809,8 @@ func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { // Name sets the optional parameter "name": Name of the object. Required // when the object metadata is not otherwise provided. Overrides the // object metadata's name value, if any. For information about how to -// URL encode object names to be path safe, see Encoding URI Path Parts. +// URL encode object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { c.urlParams_.Set("name", name) return c @@ -10107,7 +10120,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "name": { - // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "query", // "type": "string" // }, @@ -10517,7 +10530,8 @@ type ObjectsPatchCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10756,7 +10770,7 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -10839,12 +10853,14 @@ type ObjectsRewriteCall struct { // - destinationObject: Name of the new object. Required when the object // metadata is not otherwise provided. Overrides the object metadata's // name value, if any. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). // - sourceBucket: Name of the bucket in which to find the source // object. // - sourceObject: Name of the source object. For information about how // to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -11140,7 +11156,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "type": "string" // }, // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11251,7 +11267,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "type": "string" // }, // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11294,7 +11310,8 @@ type ObjectsSetIamPolicyCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11431,7 +11448,7 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11475,7 +11492,8 @@ type ObjectsTestIamPermissionsCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). // - permissions: Permissions to test. func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -11622,7 +11640,7 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11671,7 +11689,8 @@ type ObjectsUpdateCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11910,7 +11929,7 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go new file mode 100644 index 00000000..f27978e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package manual defines a resolver that can be used to manually send resolved +// addresses to ClientConn. +package manual + +import ( + "sync" + + "google.golang.org/grpc/resolver" +) + +// NewBuilderWithScheme creates a new test resolver builder with the given scheme. +func NewBuilderWithScheme(scheme string) *Resolver { + return &Resolver{ + BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, + ResolveNowCallback: func(resolver.ResolveNowOptions) {}, + CloseCallback: func() {}, + scheme: scheme, + } +} + +// Resolver is also a resolver builder. +// It's build() function always returns itself. +type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) + // ResolveNowCallback is called when the ResolveNow method is called on the + // resolver. Must not be nil. Must not be changed after the resolver may + // be built. + ResolveNowCallback func(resolver.ResolveNowOptions) + // CloseCallback is called when the Close method is called. Must not be + // nil. Must not be changed after the resolver may be built. + CloseCallback func() + scheme string + + // Fields actually belong to the resolver. + mu sync.Mutex // Guards access to CC. + CC resolver.ClientConn + bootstrapState *resolver.State +} + +// InitialState adds initial state to the resolver so that UpdateState doesn't +// need to be explicitly called after Dial. +func (r *Resolver) InitialState(s resolver.State) { + r.bootstrapState = &s +} + +// Build returns itself for Resolver, because it's both a builder and a resolver. +func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.mu.Lock() + r.CC = cc + r.mu.Unlock() + r.BuildCallback(target, cc, opts) + if r.bootstrapState != nil { + r.UpdateState(*r.bootstrapState) + } + return r, nil +} + +// Scheme returns the test scheme. +func (r *Resolver) Scheme() string { + return r.scheme +} + +// ResolveNow is a noop for Resolver. +func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { + r.ResolveNowCallback(o) +} + +// Close is a noop for Resolver. +func (r *Resolver) Close() { + r.CloseCallback() +} + +// UpdateState calls CC.UpdateState. +func (r *Resolver) UpdateState(s resolver.State) { + r.mu.Lock() + r.CC.UpdateState(s) + r.mu.Unlock() +} + +// ReportError calls CC.ReportError. +func (r *Resolver) ReportError(err error) { + r.mu.Lock() + r.CC.ReportError(err) + r.mu.Unlock() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index d11d7276..f2f02207 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,18 @@ +# ariga.io/atlas v0.10.2-0.20230427182402-87a07dfb83bf +## explicit; go 1.19 +ariga.io/atlas/schemahcl +ariga.io/atlas/sql/internal/specutil +ariga.io/atlas/sql/internal/sqlx +ariga.io/atlas/sql/migrate +ariga.io/atlas/sql/mysql +ariga.io/atlas/sql/mysql/internal/mysqlversion +ariga.io/atlas/sql/postgres +ariga.io/atlas/sql/postgres/internal/postgresop +ariga.io/atlas/sql/schema +ariga.io/atlas/sql/sqlclient +ariga.io/atlas/sql/sqlite +ariga.io/atlas/sql/sqlspec +ariga.io/atlas/sql/sqltool # bitbucket.org/creachadair/stringset v0.0.11 ## explicit; go 1.18 bitbucket.org/creachadair/stringset @@ -26,12 +41,52 @@ cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/stubs +# entgo.io/ent v0.12.3 +## explicit; go 1.20 +entgo.io/ent +entgo.io/ent/dialect +entgo.io/ent/dialect/entsql +entgo.io/ent/dialect/sql +entgo.io/ent/dialect/sql/schema +entgo.io/ent/dialect/sql/sqlgraph +entgo.io/ent/dialect/sql/sqljson +entgo.io/ent/entql +entgo.io/ent/schema +entgo.io/ent/schema/edge +entgo.io/ent/schema/field +entgo.io/ent/schema/index +# github.com/AppsFlyer/go-sundheit v0.5.0 +## explicit; go 1.15 +github.com/AppsFlyer/go-sundheit +github.com/AppsFlyer/go-sundheit/checks +github.com/AppsFlyer/go-sundheit/http +# github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 +## explicit +github.com/Azure/go-ntlmssp +# github.com/Masterminds/goutils v1.1.1 +## explicit +github.com/Masterminds/goutils +# github.com/Masterminds/semver v1.5.0 +## explicit +github.com/Masterminds/semver +# github.com/Masterminds/semver/v3 v3.2.0 +## explicit; go 1.18 +github.com/Masterminds/semver/v3 +# github.com/Masterminds/sprig/v3 v3.2.3 +## explicit; go 1.13 +github.com/Masterminds/sprig/v3 +# github.com/agext/levenshtein v1.2.1 +## explicit +github.com/agext/levenshtein # github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df ## explicit; go 1.18 github.com/antlr/antlr4/runtime/Go/antlr/v4 # github.com/apache/thrift v0.16.0 ## explicit; go 1.16 github.com/apache/thrift/lib/go/thrift +# github.com/apparentlymart/go-textseg/v13 v13.0.0 +## explicit; go 1.16 +github.com/apparentlymart/go-textseg/v13/textseg # github.com/aws/aws-sdk-go v1.44.41 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws @@ -106,6 +161,9 @@ github.com/bazelbuild/bazel-watcher/internal/ibazel/profiler github.com/bazelbuild/bazel-watcher/internal/ibazel/workspace github.com/bazelbuild/bazel-watcher/third_party/bazel/master/src/main/protobuf/analysis github.com/bazelbuild/bazel-watcher/third_party/bazel/master/src/main/protobuf/blaze_query +# github.com/beevik/etree v1.2.0 +## explicit; go 1.13 +github.com/beevik/etree # github.com/benbjohnson/clock v1.3.0 ## explicit; go 1.15 github.com/benbjohnson/clock @@ -142,21 +200,81 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/coreos/go-oidc/v3 v3.1.0 -## explicit; go 1.14 +# github.com/coreos/go-oidc/v3 v3.6.0 +## explicit; go 1.19 github.com/coreos/go-oidc/v3/oidc +# github.com/coreos/go-semver v0.3.1 +## explicit; go 1.8 +github.com/coreos/go-semver/semver +# github.com/coreos/go-systemd/v22 v22.3.2 +## explicit; go 1.12 +github.com/coreos/go-systemd/v22/journal # github.com/cpuguy83/go-md2man/v2 v2.0.2 ## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/dexidp/dex v0.0.0-20230804184036-a9d1fd31c329 +## explicit; go 1.20 +github.com/dexidp/dex/cmd/dex +github.com/dexidp/dex/connector +github.com/dexidp/dex/connector/atlassiancrowd +github.com/dexidp/dex/connector/authproxy +github.com/dexidp/dex/connector/bitbucketcloud +github.com/dexidp/dex/connector/gitea +github.com/dexidp/dex/connector/github +github.com/dexidp/dex/connector/gitlab +github.com/dexidp/dex/connector/google +github.com/dexidp/dex/connector/keystone +github.com/dexidp/dex/connector/ldap +github.com/dexidp/dex/connector/linkedin +github.com/dexidp/dex/connector/microsoft +github.com/dexidp/dex/connector/mock +github.com/dexidp/dex/connector/oauth +github.com/dexidp/dex/connector/oidc +github.com/dexidp/dex/connector/openshift +github.com/dexidp/dex/connector/saml +github.com/dexidp/dex/pkg/groups +github.com/dexidp/dex/pkg/httpclient +github.com/dexidp/dex/pkg/log +github.com/dexidp/dex/server +github.com/dexidp/dex/server/internal +github.com/dexidp/dex/storage +github.com/dexidp/dex/storage/ent +github.com/dexidp/dex/storage/ent/client +github.com/dexidp/dex/storage/ent/db +github.com/dexidp/dex/storage/ent/db/authcode +github.com/dexidp/dex/storage/ent/db/authrequest +github.com/dexidp/dex/storage/ent/db/connector +github.com/dexidp/dex/storage/ent/db/devicerequest +github.com/dexidp/dex/storage/ent/db/devicetoken +github.com/dexidp/dex/storage/ent/db/keys +github.com/dexidp/dex/storage/ent/db/migrate +github.com/dexidp/dex/storage/ent/db/oauth2client +github.com/dexidp/dex/storage/ent/db/offlinesession +github.com/dexidp/dex/storage/ent/db/password +github.com/dexidp/dex/storage/ent/db/predicate +github.com/dexidp/dex/storage/ent/db/refreshtoken +github.com/dexidp/dex/storage/ent/schema +github.com/dexidp/dex/storage/etcd +github.com/dexidp/dex/storage/kubernetes +github.com/dexidp/dex/storage/kubernetes/k8sapi +github.com/dexidp/dex/storage/memory +github.com/dexidp/dex/storage/sql +github.com/dexidp/dex/web +# github.com/dexidp/dex/api/v2 v2.1.1-0.20230804184036-a9d1fd31c329 +## explicit; go 1.17 +github.com/dexidp/dex/api/v2 # github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 ## explicit github.com/dgryski/go-farm # github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a ## explicit github.com/facebookgo/clock +# github.com/felixge/httpsnoop v1.0.3 +## explicit; go 1.13 +github.com/felixge/httpsnoop # github.com/fsnotify/fsevents v0.1.1 ## explicit github.com/fsnotify/fsevents @@ -166,6 +284,20 @@ github.com/fsnotify/fsnotify # github.com/gertd/go-pluralize v0.2.1 ## explicit; go 1.17 github.com/gertd/go-pluralize +# github.com/ghodss/yaml v1.0.0 +## explicit +github.com/ghodss/yaml +# github.com/go-asn1-ber/asn1-ber v1.5.4 +## explicit; go 1.13 +github.com/go-asn1-ber/asn1-ber +# github.com/go-jose/go-jose/v3 v3.0.0 +## explicit; go 1.12 +github.com/go-jose/go-jose/v3 +github.com/go-jose/go-jose/v3/cipher +github.com/go-jose/go-jose/v3/json +# github.com/go-ldap/ldap/v3 v3.4.5 +## explicit; go 1.14 +github.com/go-ldap/ldap/v3 # github.com/go-logr/logr v1.2.4 ## explicit; go 1.16 github.com/go-logr/logr @@ -173,6 +305,12 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr +# github.com/go-openapi/inflect v0.19.0 +## explicit +github.com/go-openapi/inflect +# github.com/go-sql-driver/mysql v1.7.1 +## explicit; go 1.13 +github.com/go-sql-driver/mysql # github.com/gocql/gocql v1.2.0 ## explicit; go 1.13 github.com/gocql/gocql @@ -316,18 +454,25 @@ github.com/googleapis/api-linter/rules/aip0235 github.com/googleapis/api-linter/rules/aip4232 github.com/googleapis/api-linter/rules/internal/data github.com/googleapis/api-linter/rules/internal/utils -# github.com/googleapis/enterprise-certificate-proxy v0.2.3 +# github.com/googleapis/enterprise-certificate-proxy v0.2.5 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.11.0 +# github.com/googleapis/gax-go/v2 v2.12.0 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal # github.com/gopherjs/gopherjs v1.17.2 ## explicit; go 1.17 +# github.com/gorilla/handlers v1.5.1 +## explicit; go 1.14 +github.com/gorilla/handlers +# github.com/gorilla/mux v1.8.0 +## explicit; go 1.12 +github.com/gorilla/mux # github.com/gorilla/securecookie v1.1.1 ## explicit github.com/gorilla/securecookie @@ -365,9 +510,28 @@ github.com/grpc-ecosystem/grpc-gateway/v2/utilities # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed ## explicit github.com/hailocab/go-hostpool +# github.com/hashicorp/hcl/v2 v2.13.0 +## explicit; go 1.18 +github.com/hashicorp/hcl/v2 +github.com/hashicorp/hcl/v2/ext/customdecode +github.com/hashicorp/hcl/v2/ext/tryfunc +github.com/hashicorp/hcl/v2/gohcl +github.com/hashicorp/hcl/v2/hclparse +github.com/hashicorp/hcl/v2/hclsyntax +github.com/hashicorp/hcl/v2/hclwrite +github.com/hashicorp/hcl/v2/json +# github.com/huandu/xstrings v1.3.3 +## explicit; go 1.12 +github.com/huandu/xstrings # github.com/iancoleman/strcase v0.3.0 ## explicit; go 1.16 github.com/iancoleman/strcase +# github.com/imdario/mergo v0.3.11 +## explicit; go 1.13 +github.com/imdario/mergo +# github.com/inconshreveable/mousetrap v1.1.0 +## explicit; go 1.18 +github.com/inconshreveable/mousetrap # github.com/jaschaephraim/lrserver v0.0.0-20171129202958-50d19f603f71 ## explicit github.com/jaschaephraim/lrserver @@ -428,6 +592,9 @@ github.com/mailru/easyjson github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/mattermost/xml-roundtrip-validator v0.1.0 +## explicit; go 1.14 +github.com/mattermost/xml-roundtrip-validator # github.com/mattn/go-colorable v0.1.12 ## explicit; go 1.13 github.com/mattn/go-colorable @@ -440,15 +607,30 @@ github.com/mattn/go-runewidth # github.com/mattn/go-shellwords v1.0.12 ## explicit; go 1.13 github.com/mattn/go-shellwords +# github.com/mattn/go-sqlite3 v1.14.17 +## explicit; go 1.16 +github.com/mattn/go-sqlite3 # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/mitchellh/copystructure v1.0.0 +## explicit +github.com/mitchellh/copystructure +# github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 +## explicit +github.com/mitchellh/go-wordwrap +# github.com/mitchellh/reflectwalk v1.0.0 +## explicit +github.com/mitchellh/reflectwalk # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.2 ## explicit; go 1.12 github.com/modern-go/reflect2 +# github.com/oklog/run v1.1.0 +## explicit; go 1.13 +github.com/oklog/run # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter @@ -475,6 +657,7 @@ github.com/pmezard/go-difflib/difflib # github.com/prometheus/client_golang v1.16.0 ## explicit; go 1.17 github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.3.0 @@ -505,9 +688,17 @@ github.com/robfig/cron # github.com/robfig/cron/v3 v3.0.1 ## explicit; go 1.12 github.com/robfig/cron/v3 +# github.com/russellhaering/goxmldsig v1.4.0 +## explicit; go 1.15 +github.com/russellhaering/goxmldsig +github.com/russellhaering/goxmldsig/etreeutils +github.com/russellhaering/goxmldsig/types # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 +# github.com/shopspring/decimal v1.2.0 +## explicit; go 1.13 +github.com/shopspring/decimal # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -515,6 +706,12 @@ github.com/sirupsen/logrus ## explicit; go 1.18 # github.com/smartystreets/assertions v1.15.1 => github.com/smarty/assertions v1.15.1 ## explicit; go 1.18 +# github.com/spf13/cast v1.4.1 +## explicit +github.com/spf13/cast +# github.com/spf13/cobra v1.7.0 +## explicit; go 1.15 +github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag @@ -614,10 +811,42 @@ github.com/xwb1989/sqlparser/dependency/bytes2 github.com/xwb1989/sqlparser/dependency/hack github.com/xwb1989/sqlparser/dependency/querypb github.com/xwb1989/sqlparser/dependency/sqltypes +# github.com/zclconf/go-cty v1.8.0 +## explicit; go 1.12 +github.com/zclconf/go-cty/cty +github.com/zclconf/go-cty/cty/convert +github.com/zclconf/go-cty/cty/function +github.com/zclconf/go-cty/cty/function/stdlib +github.com/zclconf/go-cty/cty/gocty +github.com/zclconf/go-cty/cty/json +github.com/zclconf/go-cty/cty/set # go.ciq.dev/pika v0.0.0-20230819201750-737c3e8f413d ## explicit; go 1.20 go.ciq.dev/pika go.ciq.dev/pika/parser +# go.etcd.io/etcd/api/v3 v3.5.9 +## explicit; go 1.19 +go.etcd.io/etcd/api/v3/authpb +go.etcd.io/etcd/api/v3/etcdserverpb +go.etcd.io/etcd/api/v3/membershippb +go.etcd.io/etcd/api/v3/mvccpb +go.etcd.io/etcd/api/v3/v3rpc/rpctypes +go.etcd.io/etcd/api/v3/version +# go.etcd.io/etcd/client/pkg/v3 v3.5.9 +## explicit; go 1.19 +go.etcd.io/etcd/client/pkg/v3/fileutil +go.etcd.io/etcd/client/pkg/v3/logutil +go.etcd.io/etcd/client/pkg/v3/systemd +go.etcd.io/etcd/client/pkg/v3/tlsutil +go.etcd.io/etcd/client/pkg/v3/transport +go.etcd.io/etcd/client/pkg/v3/types +# go.etcd.io/etcd/client/v3 v3.5.9 +## explicit; go 1.19 +go.etcd.io/etcd/client/v3 +go.etcd.io/etcd/client/v3/credentials +go.etcd.io/etcd/client/v3/internal/endpoint +go.etcd.io/etcd/client/v3/internal/resolver +go.etcd.io/etcd/client/v3/namespace # go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io @@ -952,10 +1181,13 @@ go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore +go.uber.org/zap/zapgrpc # golang.org/x/crypto v0.11.0 ## explicit; go 1.17 golang.org/x/crypto/acme golang.org/x/crypto/acme/autocert +golang.org/x/crypto/bcrypt +golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/cryptobyte @@ -964,7 +1196,9 @@ golang.org/x/crypto/ed25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 +golang.org/x/crypto/md4 golang.org/x/crypto/pbkdf2 +golang.org/x/crypto/scrypt # golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc ## explicit; go 1.20 golang.org/x/exp/constraints @@ -972,11 +1206,16 @@ golang.org/x/exp/maps golang.org/x/exp/slices # golang.org/x/mod v0.10.0 ## explicit; go 1.17 +golang.org/x/mod/internal/lazyregexp +golang.org/x/mod/modfile +golang.org/x/mod/module golang.org/x/mod/semver # golang.org/x/net v0.13.0 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context +golang.org/x/net/html +golang.org/x/net/html/atom golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/h2c @@ -992,13 +1231,15 @@ golang.org/x/net/trace ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler +golang.org/x/oauth2/bitbucket +golang.org/x/oauth2/github golang.org/x/oauth2/google golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.2.0 -## explicit +# golang.org/x/sync v0.3.0 +## explicit; go 1.17 golang.org/x/sync/semaphore # golang.org/x/sys v0.11.0 ## explicit; go 1.17 @@ -1023,7 +1264,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.8.0 +# golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 ## explicit; go 1.18 golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver @@ -1044,8 +1285,9 @@ golang.org/x/tools/internal/typesinternal ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.126.0 +# google.golang.org/api v0.134.0 ## explicit; go 1.19 +google.golang.org/api/admin/directory/v1 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/iamcredentials/v1 @@ -1156,6 +1398,7 @@ google.golang.org/grpc/reflection google.golang.org/grpc/reflection/grpc_reflection_v1 google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/manual google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status @@ -1337,4 +1580,5 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml +# github.com/coreos/bbolt v1.3.7 => go.etcd.io/bbolt v1.3.7 # github.com/smartystreets/assertions v1.15.1 => github.com/smarty/assertions v1.15.1