mirror of
https://github.com/rocky-linux/peridot.git
synced 2024-12-18 08:58:30 +00:00
Merge pull request #28 from mstg/peridot-cli-lookaside
This commit is contained in:
commit
ce6eb64ee2
3
.ijwb/.idea/codeStyles/Project.xml
generated
3
.ijwb/.idea/codeStyles/Project.xml
generated
@ -109,6 +109,9 @@
|
||||
<pair source="c" header="h" />
|
||||
</extensions>
|
||||
</Objective-C-extensions>
|
||||
<ScalaCodeStyleSettings>
|
||||
<option name="MULTILINE_STRING_CLOSING_QUOTES_ON_NEW_LINE" value="true" />
|
||||
</ScalaCodeStyleSettings>
|
||||
<TypeScriptCodeStyleSettings version="0">
|
||||
<option name="FORCE_SEMICOLON_STYLE" value="true" />
|
||||
<option name="SPACE_BEFORE_FUNCTION_LEFT_PARENTH" value="false" />
|
||||
|
@ -54,7 +54,7 @@ go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
nogo = "@peridot//:nogo",
|
||||
version = "1.17.7",
|
||||
version = "1.18.3",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@ -164,10 +164,10 @@ go_repositories()
|
||||
# --start protoc_gen_validate--
|
||||
http_archive(
|
||||
name = "com_envoyproxy_protoc_gen_validate",
|
||||
sha256 = "51ba05210a1a2940530455e01c010daa26d504f4b14855a452716772ea39090c",
|
||||
strip_prefix = "protoc-gen-validate-0.6.3",
|
||||
sha256 = "4c692c62e16c168049bca2b2972b0a25222870cf53e61be30b50d761e58728bd",
|
||||
strip_prefix = "protoc-gen-validate-0.6.7",
|
||||
urls = [
|
||||
"https://github.com/envoyproxy/protoc-gen-validate/archive/v0.6.3.tar.gz",
|
||||
"https://github.com/envoyproxy/protoc-gen-validate/archive/v0.6.7.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
13
go.mod
13
go.mod
@ -26,12 +26,14 @@ require (
|
||||
github.com/gocolly/colly/v2 v2.1.0
|
||||
github.com/gogo/status v1.1.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0
|
||||
github.com/imdario/mergo v0.3.11 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.4
|
||||
github.com/lib/pq v1.10.2
|
||||
github.com/ory/hydra-client-go v1.10.6
|
||||
github.com/pelletier/go-toml v1.8.1 // indirect
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/rocky-linux/srpmproc v0.3.16
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.1.3
|
||||
@ -44,17 +46,17 @@ require (
|
||||
go.temporal.io/api v1.6.1-0.20211110205628-60c98e9cbfe2
|
||||
go.temporal.io/sdk v1.13.1
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
|
||||
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247
|
||||
google.golang.org/grpc v1.44.0
|
||||
google.golang.org/protobuf v1.27.1
|
||||
google.golang.org/protobuf v1.28.1
|
||||
gopkg.in/ini.v1 v1.57.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
k8s.io/api v0.22.1
|
||||
k8s.io/apimachinery v0.22.1
|
||||
k8s.io/client-go v0.22.1
|
||||
openapi.peridot.resf.org/peridotopenapi v0.0.0-00010101000000-000000000000
|
||||
peridot.resf.org/common v0.0.0-00010101000000-000000000000
|
||||
peridot.resf.org/obsidian/pb v0.0.0-00010101000000-000000000000
|
||||
peridot.resf.org/peridot/keykeeper/pb v0.0.0-00010101000000-000000000000
|
||||
@ -65,7 +67,10 @@ require (
|
||||
)
|
||||
|
||||
// Manual replace
|
||||
replace github.com/dgrijalva/jwt-go v3.2.0+incompatible => github.com/golang-jwt/jwt/v4 v4.4.2
|
||||
replace (
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible => github.com/golang-jwt/jwt/v4 v4.4.2
|
||||
openapi.peridot.resf.org/peridotopenapi => ./bazel-bin/peridot/proto/v1/client_go
|
||||
)
|
||||
|
||||
// sync-replace-start
|
||||
replace (
|
||||
|
79
go.sum
79
go.sum
@ -73,7 +73,10 @@ github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4Rq
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
|
||||
github.com/andybalholm/cascadia v1.2.0 h1:vuRCkM5Ozh/BfmsaTm26kbjm0mIOM3yS5Ek/F5h18aE=
|
||||
@ -112,6 +115,8 @@ github.com/aws/aws-sdk-go v1.36.12 h1:YJpKFEMbqEoo+incs5qMe61n1JH3o4O1IMkMexLzJG
|
||||
github.com/aws/aws-sdk-go v1.36.12/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/bluekeyes/go-gitdiff v0.5.0 h1:AXrIoy/VEA9Baz2lhwMlpdzDJ/sKof6C9yTt1oqw4hQ=
|
||||
@ -121,8 +126,11 @@ github.com/cavaliergopher/rpm v1.2.0/go.mod h1:R0q3vTqa7RUvPofAZYrnjJ63hh2vngjFf
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d h1:S2NE3iHSwP0XV47EEXL8mWmRdEfGscSJ+7EgePNgt0s=
|
||||
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
@ -197,8 +205,12 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
@ -387,9 +399,11 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@ -426,6 +440,7 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
@ -482,15 +497,19 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/jzelinskie/stringz v0.0.0-20210414224931-d6a8ce844a70 h1:thTca5Eyouk5CEcJ75Cbw9CSAGE7TAc6rIi+WgHWpOE=
|
||||
github.com/jzelinskie/stringz v0.0.0-20210414224931-d6a8ce844a70/go.mod h1:hHYbgxJuNLRw91CmpuFsYEOyQqpDVFg8pvEh23vy4P0=
|
||||
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
|
||||
@ -506,6 +525,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@ -540,6 +560,7 @@ github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
@ -561,11 +582,13 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
@ -604,13 +627,33 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@ -636,6 +679,7 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
@ -838,10 +882,13 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211109214657-ef0fda0de508/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -849,8 +896,11 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -861,7 +911,9 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -891,6 +943,7 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -905,24 +958,32 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08 h1:WecRHqgE09JBkh/584XIE6PMz5KKE/vER4izNUi30AQ=
|
||||
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@ -1107,8 +1168,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@ -1133,6 +1195,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
|
@ -64,11 +64,16 @@ func init() {
|
||||
|
||||
pf := root.PersistentFlags()
|
||||
pf.String("target.db", "", "target db to initialize")
|
||||
pf.Bool("skip", false, "Whether to skip InitDB without removing it as an init container")
|
||||
|
||||
utils.AddFlags(pf, cnf)
|
||||
}
|
||||
|
||||
func mn(_ *cobra.Command, _ []string) {
|
||||
if viper.GetBool("skip") {
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
targetDB := viper.GetString("target.db")
|
||||
|
@ -61,6 +61,9 @@ func callbackForwarder(callbackURL string) string {
|
||||
// this section contained a callback forwarder, but cannot be published
|
||||
// todo(mustafa): evaluate other ways to make it easier for dev
|
||||
if env == "dev" || env == "" {
|
||||
if fwd := os.Getenv("OBSIDIAN_CALLBACK_FORWARDER"); fwd != "" {
|
||||
return fmt.Sprintf("%s/%s", fwd, callbackURL)
|
||||
}
|
||||
return callbackURL
|
||||
}
|
||||
return callbackURL
|
||||
|
@ -80,9 +80,7 @@ func (s *Server) interceptor(ctx context.Context, req interface{}, usi *grpc.Una
|
||||
func (s *Server) Run() {
|
||||
res := utils.NewGRPCServer(
|
||||
&utils.GRPCOptions{
|
||||
ServerOptions: []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(s.interceptor),
|
||||
},
|
||||
Interceptor: s.interceptor,
|
||||
},
|
||||
func(r *utils.Register) {
|
||||
endpoints := []utils.GrpcEndpointRegister{
|
||||
|
@ -612,7 +612,7 @@ func (c *Controller) BuildWorkflow(ctx workflow.Context, req *peridotpb.SubmitBu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subtask, err := c.db.GetTask(uploadSRPMResult.Subtask.ID.String(), project.ID.String())
|
||||
subtask, err := c.db.GetTask(uploadSRPMResult.Subtask.ID.String(), utils.Pointer(project.ID.String()))
|
||||
if err != nil {
|
||||
setInternalError(errorDetails, err)
|
||||
return nil, err
|
||||
@ -721,7 +721,7 @@ func (c *Controller) BuildWorkflow(ctx workflow.Context, req *peridotpb.SubmitBu
|
||||
if result.Skip {
|
||||
continue
|
||||
}
|
||||
subtask, err := c.db.GetTask(result.Subtask.ID.String(), project.ID.String())
|
||||
subtask, err := c.db.GetTask(result.Subtask.ID.String(), utils.Pointer(project.ID.String()))
|
||||
if err != nil {
|
||||
ret.err = fmt.Errorf("failed to get task: %s", err)
|
||||
return
|
||||
|
@ -54,6 +54,7 @@ import (
|
||||
"peridot.resf.org/peridot/db/models"
|
||||
peridotpb "peridot.resf.org/peridot/pb"
|
||||
"peridot.resf.org/utils"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@ -78,6 +79,19 @@ func archToGoArch(arch string) string {
|
||||
return arch
|
||||
}
|
||||
|
||||
func goArchToArch(arch string) string {
|
||||
switch arch {
|
||||
case "arm64":
|
||||
return "aarch64"
|
||||
case "ppc64le":
|
||||
return "ppc64le"
|
||||
case "s390x":
|
||||
return "s390x"
|
||||
default:
|
||||
return "x86_64"
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) genNameWorker(buildID, purpose string) string {
|
||||
return strings.ReplaceAll(fmt.Sprintf("pb-%s-%s", buildID, purpose), "_", "-")
|
||||
}
|
||||
@ -89,13 +103,20 @@ func (c *Controller) genNameWorker(buildID, purpose string) string {
|
||||
func (c *Controller) provisionWorker(ctx workflow.Context, req *ProvisionWorkerRequest) (string, func(), error) {
|
||||
queue := c.mainQueue
|
||||
|
||||
projects, err := c.db.ListProjects(&peridotpb.ProjectFilters{
|
||||
Id: wrapperspb.String(req.ProjectId),
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("could not list projects: %v", err)
|
||||
var project *models.Project
|
||||
if req.ProjectId != "" {
|
||||
projects, err := c.db.ListProjects(&peridotpb.ProjectFilters{
|
||||
Id: wrapperspb.String(req.ProjectId),
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("could not list projects: %v", err)
|
||||
}
|
||||
project = &projects[0]
|
||||
} else {
|
||||
project = &models.Project{
|
||||
Archs: []string{goArchToArch(runtime.GOARCH)},
|
||||
}
|
||||
}
|
||||
project := projects[0]
|
||||
|
||||
// Normalize arch string
|
||||
imageArch := req.Arch
|
||||
@ -149,7 +170,7 @@ func (c *Controller) provisionWorker(ctx workflow.Context, req *ProvisionWorkerR
|
||||
ctx = workflow.WithChildOptions(ctx, workflow.ChildWorkflowOptions{
|
||||
TaskQueue: queue,
|
||||
})
|
||||
err = workflow.ExecuteChildWorkflow(ctx, c.ProvisionWorkerWorkflow, req, queue, imageArch).Get(ctx, &podName)
|
||||
err := workflow.ExecuteChildWorkflow(ctx, c.ProvisionWorkerWorkflow, req, queue, imageArch).Get(ctx, &podName)
|
||||
if err != nil {
|
||||
var applicationErr *temporal.ApplicationError
|
||||
if errors.As(err, &applicationErr) {
|
||||
@ -181,7 +202,11 @@ func (c *Controller) provisionWorker(ctx workflow.Context, req *ProvisionWorkerR
|
||||
func (c *Controller) ProvisionWorkerWorkflow(ctx workflow.Context, req *ProvisionWorkerRequest, queue string, imageArch string) (string, error) {
|
||||
var task models.Task
|
||||
taskSideEffect := workflow.SideEffect(ctx, func(ctx workflow.Context) interface{} {
|
||||
task, err := c.db.CreateTask(nil, "noarch", peridotpb.TaskType_TASK_TYPE_WORKER_PROVISION, &req.ProjectId, &req.TaskId)
|
||||
var projectId *string
|
||||
if req.ProjectId != "" {
|
||||
projectId = &req.ProjectId
|
||||
}
|
||||
task, err := c.db.CreateTask(nil, "noarch", peridotpb.TaskType_TASK_TYPE_WORKER_PROVISION, projectId, &req.TaskId)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@ -192,7 +217,7 @@ func (c *Controller) ProvisionWorkerWorkflow(ctx workflow.Context, req *Provisio
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !task.ProjectId.Valid {
|
||||
if task.ID.String() == "" {
|
||||
return "", fmt.Errorf("could not create task")
|
||||
}
|
||||
|
||||
@ -234,7 +259,11 @@ func (c *Controller) ProvisionWorkerWorkflow(ctx workflow.Context, req *Provisio
|
||||
func (c *Controller) DestroyWorkerWorkflow(ctx workflow.Context, req *ProvisionWorkerRequest) error {
|
||||
var task models.Task
|
||||
taskSideEffect := workflow.SideEffect(ctx, func(ctx workflow.Context) interface{} {
|
||||
task, err := c.db.CreateTask(nil, "noarch", peridotpb.TaskType_TASK_TYPE_WORKER_DESTROY, &req.ProjectId, &req.TaskId)
|
||||
var projectId *string
|
||||
if req.ProjectId != "" {
|
||||
projectId = &req.ProjectId
|
||||
}
|
||||
task, err := c.db.CreateTask(nil, "noarch", peridotpb.TaskType_TASK_TYPE_WORKER_DESTROY, projectId, &req.TaskId)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@ -245,7 +274,7 @@ func (c *Controller) DestroyWorkerWorkflow(ctx workflow.Context, req *ProvisionW
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !task.ProjectId.Valid {
|
||||
if task.ID.String() == "" {
|
||||
return fmt.Errorf("could not create task")
|
||||
}
|
||||
|
||||
@ -570,6 +599,10 @@ func (c *Controller) CreateK8sPodActivity(ctx context.Context, req *ProvisionWor
|
||||
Name: "BYC_NS",
|
||||
Value: os.Getenv("BYC_NS"),
|
||||
},
|
||||
{
|
||||
Name: "BYC_FORCE_NS",
|
||||
Value: os.Getenv("BYC_FORCE_NS"),
|
||||
},
|
||||
{
|
||||
Name: "LOCALSTACK_ENDPOINT",
|
||||
Value: os.Getenv("LOCALSTACK_ENDPOINT"),
|
||||
|
@ -54,6 +54,7 @@ import (
|
||||
yumrepofspb "peridot.resf.org/peridot/yumrepofs/pb"
|
||||
"peridot.resf.org/secparse/rpmutils"
|
||||
"peridot.resf.org/utils"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -61,6 +62,11 @@ type RpmImportActivityTaskStage1 struct {
|
||||
Build *models.Build
|
||||
}
|
||||
|
||||
type RpmImportUploadWrapper struct {
|
||||
Upload *UploadActivityResult
|
||||
TaskID string
|
||||
}
|
||||
|
||||
func (c *Controller) RpmImportWorkflow(ctx workflow.Context, req *peridotpb.RpmImportRequest, task *models.Task) (*peridotpb.RpmImportTask, error) {
|
||||
var ret peridotpb.RpmImportTask
|
||||
deferTask, errorDetails, err := c.commonCreateTask(task, &ret)
|
||||
@ -93,7 +99,7 @@ func (c *Controller) RpmImportWorkflow(ctx workflow.Context, req *peridotpb.RpmI
|
||||
MaximumAttempts: 1,
|
||||
},
|
||||
})
|
||||
err = workflow.ExecuteActivity(importCtx, c.RpmImportActivity, req, task.ID.String()).Get(ctx, &importRes)
|
||||
err = workflow.ExecuteActivity(importCtx, c.RpmImportActivity, req, task.ID.String(), false).Get(ctx, &importRes)
|
||||
if err != nil {
|
||||
setActivityError(errorDetails, err)
|
||||
return nil, err
|
||||
@ -149,7 +155,139 @@ func (c *Controller) RpmImportWorkflow(ctx workflow.Context, req *peridotpb.RpmI
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (c *Controller) RpmImportActivity(ctx context.Context, req *peridotpb.RpmImportRequest, taskID string) (*RpmImportActivityTaskStage1, error) {
|
||||
func (c *Controller) RpmLookasideBatchImportWorkflow(ctx workflow.Context, req *peridotpb.RpmLookasideBatchImportRequest, task *models.Task) (*peridotpb.RpmLookasideBatchImportTask, error) {
|
||||
var ret peridotpb.RpmLookasideBatchImportTask
|
||||
deferTask, errorDetails, err := c.commonCreateTask(task, &ret)
|
||||
defer deferTask()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
task.Status = peridotpb.TaskStatus_TASK_STATUS_FAILED
|
||||
|
||||
importTaskQueue, cleanupWorker, err := c.provisionWorker(ctx, &ProvisionWorkerRequest{
|
||||
TaskId: task.ID.String(),
|
||||
ParentTaskId: task.ParentTaskId,
|
||||
Purpose: "batchrpmimport",
|
||||
Arch: "noarch",
|
||||
ProjectId: req.ProjectId,
|
||||
})
|
||||
if err != nil {
|
||||
setInternalError(errorDetails, err)
|
||||
return nil, err
|
||||
}
|
||||
defer cleanupWorker()
|
||||
|
||||
taskID := task.ID.String()
|
||||
var importResults []*RpmImportActivityTaskStage1
|
||||
var taskIDs []string
|
||||
taskIDBuildMap := map[string]*RpmImportActivityTaskStage1{}
|
||||
for _, blob := range req.LookasideBlobs {
|
||||
var archTask models.Task
|
||||
archTaskEffect := workflow.SideEffect(ctx, func(ctx workflow.Context) interface{} {
|
||||
newTask, err := c.db.CreateTask(nil, "noarch", peridotpb.TaskType_TASK_TYPE_RPM_IMPORT, &req.ProjectId, &taskID)
|
||||
if err != nil {
|
||||
return &models.Task{}
|
||||
}
|
||||
|
||||
_ = c.db.SetTaskStatus(newTask.ID.String(), peridotpb.TaskStatus_TASK_STATUS_RUNNING)
|
||||
return newTask
|
||||
})
|
||||
err := archTaskEffect.Get(&archTask)
|
||||
if err != nil || !archTask.ProjectId.Valid {
|
||||
return nil, fmt.Errorf("failed to create rpm task: %s", err)
|
||||
}
|
||||
taskIDs = append(taskIDs, archTask.ID.String())
|
||||
|
||||
var importRes RpmImportActivityTaskStage1
|
||||
importCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
|
||||
StartToCloseTimeout: time.Hour,
|
||||
HeartbeatTimeout: 20 * time.Second,
|
||||
TaskQueue: importTaskQueue,
|
||||
RetryPolicy: &temporal.RetryPolicy{
|
||||
MaximumAttempts: 1,
|
||||
},
|
||||
})
|
||||
blobReq := &peridotpb.RpmImportRequest{
|
||||
ProjectId: req.ProjectId,
|
||||
Rpms: blob,
|
||||
ForceOverride: req.ForceOverride,
|
||||
}
|
||||
err = workflow.ExecuteActivity(importCtx, c.RpmImportActivity, blobReq, archTask.ID.String(), true).Get(ctx, &importRes)
|
||||
if err != nil {
|
||||
setActivityError(errorDetails, err)
|
||||
return nil, err
|
||||
}
|
||||
importResults = append(importResults, &importRes)
|
||||
taskIDBuildMap[archTask.ID.String()] = &importRes
|
||||
}
|
||||
|
||||
var res []*RpmImportUploadWrapper
|
||||
for _, importTaskID := range taskIDs {
|
||||
uploadArchCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{
|
||||
ScheduleToStartTimeout: 12 * time.Hour,
|
||||
StartToCloseTimeout: 24 * time.Hour,
|
||||
HeartbeatTimeout: 2 * time.Minute,
|
||||
TaskQueue: importTaskQueue,
|
||||
})
|
||||
|
||||
var interimRes []*UploadActivityResult
|
||||
err = workflow.ExecuteActivity(uploadArchCtx, c.UploadArchActivity, req.ProjectId, importTaskID).Get(ctx, &interimRes)
|
||||
if err != nil {
|
||||
setActivityError(errorDetails, err)
|
||||
return nil, err
|
||||
}
|
||||
for _, ires := range interimRes {
|
||||
res = append(res, &RpmImportUploadWrapper{
|
||||
Upload: ires,
|
||||
TaskID: importTaskID,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, result := range res {
|
||||
stage1 := taskIDBuildMap[result.TaskID]
|
||||
if stage1 == nil {
|
||||
return nil, fmt.Errorf("failed to find task %s", result.TaskID)
|
||||
}
|
||||
err = c.db.AttachTaskToBuild(stage1.Build.ID.String(), result.Upload.Subtask.ID.String())
|
||||
if err != nil {
|
||||
err = status.Errorf(codes.Internal, "could not attach task to build: %v", err)
|
||||
setInternalError(errorDetails, err)
|
||||
return nil, err
|
||||
}
|
||||
if result.Upload.Skip {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
yumrepoCtx := workflow.WithChildOptions(ctx, workflow.ChildWorkflowOptions{
|
||||
TaskQueue: "yumrepofs",
|
||||
})
|
||||
updateRepoRequest := &UpdateRepoRequest{
|
||||
ProjectID: req.ProjectId,
|
||||
BuildIDs: []string{},
|
||||
Delete: false,
|
||||
TaskID: &taskID,
|
||||
NoDeletePrevious: true,
|
||||
}
|
||||
for _, importRes := range importResults {
|
||||
updateRepoRequest.BuildIDs = append(updateRepoRequest.BuildIDs, importRes.Build.ID.String())
|
||||
}
|
||||
updateRepoTask := &yumrepofspb.UpdateRepoTask{}
|
||||
err = workflow.ExecuteChildWorkflow(yumrepoCtx, c.RepoUpdaterWorkflow, updateRepoRequest).Get(yumrepoCtx, updateRepoTask)
|
||||
if err != nil {
|
||||
setActivityError(errorDetails, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
task.Status = peridotpb.TaskStatus_TASK_STATUS_SUCCEEDED
|
||||
|
||||
ret.RepoChanges = updateRepoTask
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (c *Controller) RpmImportActivity(ctx context.Context, req *peridotpb.RpmImportRequest, taskID string, setTaskStatus bool) (*RpmImportActivityTaskStage1, error) {
|
||||
go func() {
|
||||
for {
|
||||
activity.RecordHeartbeat(ctx)
|
||||
@ -164,48 +302,79 @@ func (c *Controller) RpmImportActivity(ctx context.Context, req *peridotpb.RpmIm
|
||||
}
|
||||
buf.Write(bts)
|
||||
|
||||
c.log.Infof("Reading tar: %s", req.Rpms)
|
||||
|
||||
rpmBufs := map[string][]byte{}
|
||||
tr := tar.NewReader(&buf)
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var nBuf bytes.Buffer
|
||||
if _, err := io.Copy(&nBuf, tr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.log.Infof("Detected RPM: %s", hdr.Name)
|
||||
rpmBufs[hdr.Name] = nBuf.Bytes()
|
||||
}
|
||||
var rpms []*rpm.Package
|
||||
for _, b := range rpmBufs {
|
||||
p, err := rpm.Read(bytes.NewBuffer(b))
|
||||
rpmBufs := map[string][]byte{}
|
||||
|
||||
if strings.HasSuffix(req.Rpms, ".tar") {
|
||||
c.log.Infof("Reading tar: %s", req.Rpms)
|
||||
|
||||
tr := tar.NewReader(&buf)
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var nBuf bytes.Buffer
|
||||
if _, err := io.Copy(&nBuf, tr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.log.Infof("Detected RPM: %s", hdr.Name)
|
||||
rpmBufs[hdr.Name] = nBuf.Bytes()
|
||||
}
|
||||
for _, b := range rpmBufs {
|
||||
p, err := rpm.Read(bytes.NewBuffer(b))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpms = append(rpms, p)
|
||||
}
|
||||
} else {
|
||||
c.log.Infof("Reading RPM: %s", req.Rpms)
|
||||
p, err := rpm.Read(&buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpms = append(rpms, p)
|
||||
|
||||
realName := p.String() + ".rpm"
|
||||
if p.SourceRPM() == "" && p.Architecture() == "i686" {
|
||||
realName = strings.ReplaceAll(realName, ".i686", ".src")
|
||||
}
|
||||
rpmBufs[realName] = bts
|
||||
}
|
||||
|
||||
var nvr string
|
||||
for _, rpmObj := range rpms {
|
||||
realNvr := rpmObj.String()
|
||||
if rpmObj.SourceRPM() == "" && rpmObj.Architecture() == "i686" {
|
||||
realNvr = strings.ReplaceAll(realNvr, ".i686", ".src")
|
||||
}
|
||||
if nvr == "" {
|
||||
nvr = rpmObj.SourceRPM()
|
||||
}
|
||||
if nvr != rpmObj.SourceRPM() {
|
||||
return nil, fmt.Errorf("only include RPMs from one package")
|
||||
if nvr == "" && rpmObj.Architecture() == "i686" {
|
||||
nvr = realNvr
|
||||
}
|
||||
|
||||
break
|
||||
} else {
|
||||
if nvr != rpmObj.SourceRPM() && nvr != fmt.Sprintf("%s.rpm", realNvr) {
|
||||
return nil, fmt.Errorf("only include RPMs from one package")
|
||||
}
|
||||
}
|
||||
}
|
||||
if !rpmutils.NVR().MatchString(nvr) {
|
||||
return nil, fmt.Errorf("invalid SNVR: %s", nvr)
|
||||
}
|
||||
|
||||
nvrMatch := rpmutils.NVR().FindStringSubmatch(nvr)
|
||||
var nvrMatch []string
|
||||
if rpmutils.NVRUnusualRelease().MatchString(nvr) {
|
||||
nvrMatch = rpmutils.NVRUnusualRelease().FindStringSubmatch(nvr)
|
||||
} else {
|
||||
nvrMatch = rpmutils.NVR().FindStringSubmatch(nvr)
|
||||
}
|
||||
srcNvra := fmt.Sprintf("%s-%s-%s.src", nvrMatch[1], nvrMatch[2], nvrMatch[3])
|
||||
|
||||
beginTx, err := c.db.Begin()
|
||||
@ -307,6 +476,14 @@ func (c *Controller) RpmImportActivity(ctx context.Context, req *peridotpb.RpmIm
|
||||
}
|
||||
}
|
||||
|
||||
if setTaskStatus {
|
||||
err = tx.SetTaskStatus(taskID, peridotpb.TaskStatus_TASK_STATUS_SUCCEEDED)
|
||||
if err != nil {
|
||||
err = status.Errorf(codes.Internal, "could not set task status: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = beginTx.Commit()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "could not commit transaction: %v", err)
|
||||
|
@ -46,6 +46,7 @@ import (
|
||||
peridotpb "peridot.resf.org/peridot/pb"
|
||||
"peridot.resf.org/peridot/plugin"
|
||||
"peridot.resf.org/peridot/rpmbuild"
|
||||
"peridot.resf.org/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -199,7 +200,7 @@ func (c *Controller) commonCreateTask(task *models.Task, taskResponse proto.Mess
|
||||
c.log.Errorf("could not set task status: %v", err)
|
||||
}
|
||||
|
||||
taskDb, err := c.db.GetTask(task.ID.String(), task.ProjectId.String)
|
||||
taskDb, err := c.db.GetTask(task.ID.String(), utils.NullStringToPointer(task.ProjectId))
|
||||
if err != nil {
|
||||
c.log.Errorf("could not get task: %v", err)
|
||||
return
|
||||
|
@ -227,8 +227,13 @@ func GenerateArchMapForArtifacts(artifacts models.TaskArtifacts, project *models
|
||||
for i, artifact := range artifacts {
|
||||
var name string
|
||||
var arch string
|
||||
if rpmutils.NVR().MatchString(filepath.Base(artifact.Name)) {
|
||||
nvr := rpmutils.NVR().FindStringSubmatch(filepath.Base(artifact.Name))
|
||||
base := strings.TrimSuffix(filepath.Base(artifact.Name), ".rpm")
|
||||
if rpmutils.NVRUnusualRelease().MatchString(base) {
|
||||
nvr := rpmutils.NVRUnusualRelease().FindStringSubmatch(base)
|
||||
name = nvr[1]
|
||||
arch = nvr[4]
|
||||
} else if rpmutils.NVR().MatchString(base) {
|
||||
nvr := rpmutils.NVR().FindStringSubmatch(base)
|
||||
name = nvr[1]
|
||||
arch = nvr[4]
|
||||
}
|
||||
@ -324,7 +329,6 @@ func GenerateArchMapForArtifacts(artifacts models.TaskArtifacts, project *models
|
||||
artifactArchMap[arch] = append(artifactArchMap[arch], &newArtifact)
|
||||
}
|
||||
} else {
|
||||
arch := artifact.Arch
|
||||
if composetools.IsDebugPackage(name) {
|
||||
arch = arch + "-debug"
|
||||
}
|
||||
|
34
peridot/cmd/v1/peridot/BUILD.bazel
Normal file
34
peridot/cmd/v1/peridot/BUILD.bazel
Normal file
@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "peridot_lib",
|
||||
srcs = [
|
||||
"build.go",
|
||||
"build_rpm_import.go",
|
||||
"lookaside.go",
|
||||
"lookaside_upload.go",
|
||||
"main.go",
|
||||
"project.go",
|
||||
"project_create_hashed_repos.go",
|
||||
"utils.go",
|
||||
],
|
||||
data = [
|
||||
"//peridot/proto/v1:client_go",
|
||||
],
|
||||
importpath = "peridot.resf.org/peridot/cmd/v1/peridot",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//vendor/github.com/sirupsen/logrus",
|
||||
"//vendor/github.com/spf13/cobra",
|
||||
"//vendor/github.com/spf13/viper",
|
||||
"//vendor/golang.org/x/oauth2",
|
||||
"//vendor/golang.org/x/oauth2/clientcredentials",
|
||||
"//vendor/openapi.peridot.resf.org/peridotopenapi",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "peridot",
|
||||
embed = [":peridot_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
37
peridot/cmd/v1/peridot/build.go
Normal file
37
peridot/cmd/v1/peridot/build.go
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright (c) All respective contributors to the Peridot Project. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Rocky Enterprise Software Foundation, Inc. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Ctrl IQ, Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
var build = &cobra.Command{
|
||||
Use: "build",
|
||||
}
|
131
peridot/cmd/v1/peridot/build_rpm_import.go
Normal file
131
peridot/cmd/v1/peridot/build_rpm_import.go
Normal file
@ -0,0 +1,131 @@
|
||||
// Copyright (c) All respective contributors to the Peridot Project. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Rocky Enterprise Software Foundation, Inc. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Ctrl IQ, Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"github.com/spf13/cobra"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"openapi.peridot.resf.org/peridotopenapi"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
type LookasideUploadTask struct {
|
||||
Task struct {
|
||||
Subtasks []struct {
|
||||
Response struct {
|
||||
Digest string `json:"digest"`
|
||||
} `json:"response"`
|
||||
} `json:"subtasks"`
|
||||
} `json:"task"`
|
||||
}
|
||||
|
||||
var buildRpmImport = &cobra.Command{
|
||||
Use: "rpm-import [*.rpm]",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: buildRpmImportMn,
|
||||
}
|
||||
|
||||
var buildRpmImportForceOverride bool
|
||||
|
||||
func init() {
|
||||
buildRpmImport.Flags().BoolVar(&buildRpmImportForceOverride, "force-override", true, "Force override even if version exists (default: true)")
|
||||
}
|
||||
|
||||
func isFile(path string) bool {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func buildRpmImportMn(_ *cobra.Command, args []string) {
|
||||
// Ensure project id exists
|
||||
projectId := mustGetProjectID()
|
||||
_ = projectId
|
||||
|
||||
// Ensure all args are valid files
|
||||
for _, arg := range args {
|
||||
if !isFile(arg) {
|
||||
log.Fatalf("%s is not a valid file", arg)
|
||||
}
|
||||
}
|
||||
|
||||
// Upload blobs to lookaside and wait for operation to finish
|
||||
var blobs []string
|
||||
projectCl := getClient(serviceProject).(peridotopenapi.ProjectServiceApi)
|
||||
for _, arg := range args {
|
||||
bts, err := ioutil.ReadFile(arg)
|
||||
errFatal(err)
|
||||
base64EncodedBytes := base64.StdEncoding.EncodeToString(bts)
|
||||
|
||||
res, _, err := projectCl.LookasideFileUpload(getContext()).Body(peridotopenapi.V1LookasideFileUploadRequest{
|
||||
File: &base64EncodedBytes,
|
||||
}).Execute()
|
||||
errFatal(err)
|
||||
log.Printf("Uploaded %s to lookaside", arg)
|
||||
blobs = append(blobs, res.GetDigest())
|
||||
}
|
||||
|
||||
taskCl := getClient(serviceTask).(peridotopenapi.TaskServiceApi)
|
||||
|
||||
log.Println("Triggering RPM batch import")
|
||||
|
||||
cl := getClient(serviceBuild).(peridotopenapi.BuildServiceApi)
|
||||
_, _, err := cl.RpmLookasideBatchImport(getContext(), projectId).
|
||||
Body(peridotopenapi.InlineObject4{
|
||||
LookasideBlobs: &blobs,
|
||||
ForceOverride: &buildRpmImportForceOverride,
|
||||
}).Execute()
|
||||
errFatal(err)
|
||||
|
||||
// Wait for import to finish
|
||||
log.Printf("Waiting for import %s to finish\n", importRes.GetTaskId())
|
||||
for {
|
||||
res, _, err := taskCl.GetTask(getContext(), projectId, importRes.GetTaskId()).Execute()
|
||||
errFatal(err)
|
||||
task := res.GetTask()
|
||||
if task.GetDone() {
|
||||
if task.GetSubtasks()[0].GetStatus() == peridotopenapi.SUCCEEDED {
|
||||
log.Printf("Import %s finished successfully\n", importRes.GetTaskId())
|
||||
break
|
||||
} else {
|
||||
log.Printf("Import %s failed with status %s\n", importRes.GetTaskId(), task.GetSubtasks()[0].GetStatus())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
37
peridot/cmd/v1/peridot/lookaside.go
Normal file
37
peridot/cmd/v1/peridot/lookaside.go
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright (c) All respective contributors to the Peridot Project. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Rocky Enterprise Software Foundation, Inc. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Ctrl IQ, Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
var lookaside = &cobra.Command{
|
||||
Use: "lookaside",
|
||||
}
|
68
peridot/cmd/v1/peridot/lookaside_upload.go
Normal file
68
peridot/cmd/v1/peridot/lookaside_upload.go
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright (c) All respective contributors to the Peridot Project. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Rocky Enterprise Software Foundation, Inc. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Ctrl IQ, Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"openapi.peridot.resf.org/peridotopenapi"
|
||||
"os"
|
||||
)
|
||||
|
||||
var lookasideUpload = &cobra.Command{
|
||||
Use: "upload [file]",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: lookasideUploadMn,
|
||||
}
|
||||
|
||||
func lookasideUploadMn(_ *cobra.Command, args []string) {
|
||||
filePath := args[0]
|
||||
stat, err := os.Stat(filePath)
|
||||
errFatal(err)
|
||||
if stat.IsDir() {
|
||||
errFatal(fmt.Errorf("%s is a directory", filePath))
|
||||
}
|
||||
|
||||
bts, err := ioutil.ReadFile(filePath)
|
||||
errFatal(err)
|
||||
base64EncodedBytes := base64.StdEncoding.EncodeToString(bts)
|
||||
|
||||
cl := getClient(serviceProject).(peridotopenapi.ProjectServiceApi)
|
||||
res, _, err := cl.LookasideFileUpload(getContext()).Body(peridotopenapi.V1LookasideFileUploadRequest{
|
||||
File: &base64EncodedBytes,
|
||||
}).Execute()
|
||||
errFatal(err)
|
||||
|
||||
log.Printf("Digest: %s", res.GetDigest())
|
||||
}
|
109
peridot/cmd/v1/peridot/main.go
Normal file
109
peridot/cmd/v1/peridot/main.go
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright (c) All respective contributors to the Peridot Project. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Rocky Enterprise Software Foundation, Inc. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Ctrl IQ, Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var root = &cobra.Command{
|
||||
Use: "peridot",
|
||||
}
|
||||
|
||||
func init() {
|
||||
root.PersistentFlags().String("endpoint", "peridot-api.build.resf.org", "Peridot API endpoint")
|
||||
root.PersistentFlags().String("hdr-endpoint", "hdr.build.resf.org", "RESF OIDC endpoint")
|
||||
root.PersistentFlags().Bool("skip-ca-verify", false, "Whether to accept self-signed certificates")
|
||||
root.PersistentFlags().String("client-id", "", "Client ID for authentication")
|
||||
root.PersistentFlags().String("client-secret", "", "Client secret for authentication")
|
||||
root.PersistentFlags().String("project-id", "", "Peridot project ID")
|
||||
root.PersistentFlags().Bool("debug", false, "Debug mode")
|
||||
|
||||
root.AddCommand(lookaside)
|
||||
lookaside.AddCommand(lookasideUpload)
|
||||
|
||||
root.AddCommand(build)
|
||||
build.AddCommand(buildRpmImport)
|
||||
|
||||
root.AddCommand(project)
|
||||
project.AddCommand(projectCreateHashedRepos)
|
||||
|
||||
viper.SetEnvPrefix("PERIDOT")
|
||||
viper.AutomaticEnv()
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_"))
|
||||
|
||||
err := viper.BindPFlags(root.PersistentFlags())
|
||||
if err != nil {
|
||||
log.Fatalf("could not bind pflags to viper - %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := root.Execute(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func endpoint() string {
|
||||
return viper.GetString("endpoint")
|
||||
}
|
||||
|
||||
func hdrEndpoint() string {
|
||||
return viper.GetString("hdr-endpoint")
|
||||
}
|
||||
|
||||
func skipCaVerify() bool {
|
||||
return viper.GetBool("skip-ca-verify")
|
||||
}
|
||||
|
||||
func getClientId() string {
|
||||
return viper.GetString("client-id")
|
||||
}
|
||||
|
||||
func getClientSecret() string {
|
||||
return viper.GetString("client-secret")
|
||||
}
|
||||
|
||||
func mustGetProjectID() string {
|
||||
ret := viper.GetString("project-id")
|
||||
if ret == "" {
|
||||
logrus.Fatal("project-id is required")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func debug() bool {
|
||||
return viper.GetBool("debug")
|
||||
}
|
37
peridot/cmd/v1/peridot/project.go
Normal file
37
peridot/cmd/v1/peridot/project.go
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright (c) All respective contributors to the Peridot Project. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Rocky Enterprise Software Foundation, Inc. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Ctrl IQ, Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
var project = &cobra.Command{
|
||||
Use: "project",
|
||||
}
|
74
peridot/cmd/v1/peridot/project_create_hashed_repos.go
Normal file
74
peridot/cmd/v1/peridot/project_create_hashed_repos.go
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright (c) All respective contributors to the Peridot Project. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Rocky Enterprise Software Foundation, Inc. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Ctrl IQ, Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"log"
|
||||
"openapi.peridot.resf.org/peridotopenapi"
|
||||
)
|
||||
|
||||
var projectCreateHashedRepos = &cobra.Command{
|
||||
Use: "create-hashed-repos [repositories]",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: projectCreateHashedReposMn,
|
||||
}
|
||||
|
||||
func projectCreateHashedReposMn(_ *cobra.Command, args []string) {
|
||||
projectID := mustGetProjectID()
|
||||
|
||||
taskCl := getClient(serviceTask).(peridotopenapi.TaskServiceApi)
|
||||
cl := getClient(serviceProject).(peridotopenapi.ProjectServiceApi)
|
||||
|
||||
hashedRes, _, err := cl.CreateHashedRepositories(getContext(), projectID).
|
||||
Body(peridotopenapi.InlineObject8{
|
||||
Repositories: &args,
|
||||
}).
|
||||
Execute()
|
||||
errFatal(err)
|
||||
|
||||
// Wait for task to complete
|
||||
log.Printf("Waiting for hashed operation %s to finish\n", hashedRes.GetTaskId())
|
||||
for {
|
||||
res, _, err := taskCl.GetTask(getContext(), projectID, hashedRes.GetTaskId()).Execute()
|
||||
errFatal(err)
|
||||
task := res.GetTask()
|
||||
if task.GetDone() {
|
||||
if task.GetSubtasks()[0].GetStatus() == peridotopenapi.SUCCEEDED {
|
||||
log.Printf("Hashed operation %s finished successfully\n", hashedRes.GetTaskId())
|
||||
break
|
||||
} else {
|
||||
log.Printf("Hashed operation %s failed with status %s\n", hashedRes.GetTaskId(), task.GetSubtasks()[0].GetStatus())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
126
peridot/cmd/v1/peridot/utils.go
Normal file
126
peridot/cmd/v1/peridot/utils.go
Normal file
@ -0,0 +1,126 @@
|
||||
// Copyright (c) All respective contributors to the Peridot Project. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Rocky Enterprise Software Foundation, Inc. All rights reserved.
|
||||
// Copyright (c) 2021-2022 Ctrl IQ, Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
// may be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"openapi.peridot.resf.org/peridotopenapi"
|
||||
)
|
||||
|
||||
type service string
|
||||
|
||||
const (
|
||||
serviceProject = service("project")
|
||||
serviceImport = service("import")
|
||||
servicePackage = service("package")
|
||||
serviceBuild = service("build")
|
||||
serviceTask = service("task")
|
||||
)
|
||||
|
||||
var (
|
||||
doNotUseDirectlyClient map[service]interface{}
|
||||
doNotUseDirectlyCtx context.Context
|
||||
)
|
||||
|
||||
func getClient(svc service) interface{} {
|
||||
if doNotUseDirectlyClient != nil {
|
||||
return doNotUseDirectlyClient[svc]
|
||||
}
|
||||
doNotUseDirectlyClient = make(map[service]interface{})
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
// We should allow users to configure this.
|
||||
InsecureSkipVerify: skipCaVerify(), //nolint:gosec
|
||||
}
|
||||
|
||||
apiCfg := &peridotopenapi.Configuration{
|
||||
Debug: debug(),
|
||||
Host: endpoint(),
|
||||
Scheme: "https",
|
||||
UserAgent: "peridot/0.1",
|
||||
HTTPClient: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
},
|
||||
DefaultHeader: map[string]string{},
|
||||
Servers: peridotopenapi.ServerConfigurations{
|
||||
{
|
||||
URL: "https://" + endpoint(),
|
||||
},
|
||||
},
|
||||
OperationServers: map[string]peridotopenapi.ServerConfigurations{},
|
||||
}
|
||||
|
||||
apiClient := peridotopenapi.NewAPIClient(apiCfg)
|
||||
|
||||
doNotUseDirectlyClient[serviceProject] = apiClient.ProjectServiceApi
|
||||
doNotUseDirectlyClient[serviceImport] = apiClient.ImportServiceApi
|
||||
doNotUseDirectlyClient[servicePackage] = apiClient.PackageServiceApi
|
||||
doNotUseDirectlyClient[serviceBuild] = apiClient.BuildServiceApi
|
||||
doNotUseDirectlyClient[serviceTask] = apiClient.TaskServiceApi
|
||||
|
||||
return doNotUseDirectlyClient[svc]
|
||||
}
|
||||
|
||||
func getContext() context.Context {
|
||||
if doNotUseDirectlyCtx == nil {
|
||||
doNotUseDirectlyCtx = context.TODO()
|
||||
|
||||
oauth2Config := &clientcredentials.Config{
|
||||
ClientID: getClientId(),
|
||||
ClientSecret: getClientSecret(),
|
||||
// We don't currently support scopes, but authorize based on SpiceDB.
|
||||
// Lack of scopes does not indicate that client has full access, but
|
||||
// that we're managing access server sides and scopes doesn't affect that.
|
||||
Scopes: []string{},
|
||||
TokenURL: fmt.Sprintf("https://%s/oauth2/token", hdrEndpoint()),
|
||||
AuthStyle: oauth2.AuthStyleInHeader,
|
||||
}
|
||||
|
||||
tokenSource := oauth2Config.TokenSource(doNotUseDirectlyCtx)
|
||||
doNotUseDirectlyCtx = context.WithValue(doNotUseDirectlyCtx, peridotopenapi.ContextOAuth2, tokenSource)
|
||||
}
|
||||
return doNotUseDirectlyCtx
|
||||
}
|
||||
|
||||
func errFatal(err error) {
|
||||
if err != nil {
|
||||
log.Fatalf("an error occurred: %s", err.Error())
|
||||
}
|
||||
}
|
@ -107,14 +107,16 @@ func mn(_ *cobra.Command, _ []string) {
|
||||
db := serverconnector.MustAuto()
|
||||
|
||||
var initiatedPlugins []plugin.Plugin
|
||||
plugins, err := db.GetPluginsForProject(projectId)
|
||||
if err != nil {
|
||||
logrus.Fatalf("could not get plugins: %v", err)
|
||||
}
|
||||
if plugins != nil {
|
||||
initiatedPlugins, err = initiatePlugins(plugins)
|
||||
if projectId != "" {
|
||||
plugins, err := db.GetPluginsForProject(projectId)
|
||||
if err != nil {
|
||||
logrus.Fatalf("could not initiate plugins: %v", err)
|
||||
logrus.Fatalf("could not get plugins: %v", err)
|
||||
}
|
||||
if plugins != nil {
|
||||
initiatedPlugins, err = initiatePlugins(plugins)
|
||||
if err != nil {
|
||||
logrus.Fatalf("could not initiate plugins: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,6 +104,7 @@ func mn(_ *cobra.Command, _ []string) {
|
||||
w.Worker.RegisterWorkflow(w.WorkflowController.TriggerImportFromBatchWorkflow)
|
||||
w.Worker.RegisterWorkflow(w.WorkflowController.SyncCatalogWorkflow)
|
||||
w.Worker.RegisterWorkflow(w.WorkflowController.RpmImportWorkflow)
|
||||
w.Worker.RegisterWorkflow(w.WorkflowController.RpmLookasideBatchImportWorkflow)
|
||||
w.Worker.RegisterWorkflow(w.WorkflowController.CreateHashedRepositoriesWorkflow)
|
||||
}
|
||||
w.Worker.RegisterWorkflow(w.WorkflowController.ProvisionWorkerWorkflow)
|
||||
|
@ -6,10 +6,13 @@ go_library(
|
||||
importpath = "peridot.resf.org/peridot/cmd/v1/peridotserver",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//peridot/common",
|
||||
"//peridot/db/connector",
|
||||
"//peridot/impl/v1:impl",
|
||||
"//peridot/lookaside/s3",
|
||||
"//temporalutils",
|
||||
"//utils",
|
||||
"//vendor/github.com/go-git/go-billy/v5/osfs",
|
||||
"//vendor/github.com/sirupsen/logrus",
|
||||
"//vendor/github.com/spf13/cobra",
|
||||
"//vendor/go.temporal.io/sdk/client",
|
||||
|
@ -29,6 +29,11 @@ bycdeploy.new({
|
||||
cpu: '2',
|
||||
memory: '10G',
|
||||
},
|
||||
service_account_options: {
|
||||
annotations: {
|
||||
'eks.amazonaws.com/role-arn': 'arn:aws:iam::893168113496:role/peridot_k8s_role',
|
||||
}
|
||||
},
|
||||
ports: [
|
||||
{
|
||||
name: 'http',
|
||||
@ -50,6 +55,18 @@ bycdeploy.new({
|
||||
name: 'PERIDOT_PRODUCTION',
|
||||
value: if kubernetes.dev() then 'false' else 'true',
|
||||
},
|
||||
if utils.local_image then {
|
||||
name: 'PERIDOT_S3_ENDPOINT',
|
||||
value: 'minio.default.svc.cluster.local:9000'
|
||||
},
|
||||
if utils.local_image then {
|
||||
name: 'PERIDOT_S3_DISABLE_SSL',
|
||||
value: 'true'
|
||||
},
|
||||
if utils.local_image then {
|
||||
name: 'PERIDOT_S3_FORCE_PATH_STYLE',
|
||||
value: 'true'
|
||||
},
|
||||
$.dsn,
|
||||
] + temporal.kube_env('PERIDOT'),
|
||||
})
|
||||
|
@ -31,11 +31,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"go.temporal.io/sdk/client"
|
||||
peridotcommon "peridot.resf.org/peridot/common"
|
||||
serverconnector "peridot.resf.org/peridot/db/connector"
|
||||
peridotimplv1 "peridot.resf.org/peridot/impl/v1"
|
||||
"peridot.resf.org/peridot/lookaside/s3"
|
||||
"peridot.resf.org/temporalutils"
|
||||
"peridot.resf.org/utils"
|
||||
)
|
||||
@ -54,6 +57,7 @@ func init() {
|
||||
cnf.DatabaseName = &dname
|
||||
cnf.Name = "peridot"
|
||||
|
||||
peridotcommon.AddFlags(root.PersistentFlags())
|
||||
utils.AddFlags(root.PersistentFlags(), cnf)
|
||||
}
|
||||
|
||||
@ -64,7 +68,12 @@ func mn(_ *cobra.Command, _ []string) {
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
s, err := peridotimplv1.NewServer(serverconnector.MustAuto(), c)
|
||||
storage, err := s3.New(osfs.New("/"))
|
||||
if err != nil {
|
||||
logrus.Fatalln("unable to create S3 storage", err)
|
||||
}
|
||||
|
||||
s, err := peridotimplv1.NewServer(serverconnector.MustAuto(), c, storage)
|
||||
if err != nil {
|
||||
logrus.Fatalf("could not init server: %v", err)
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ type Access interface {
|
||||
// ListTasks returns only parent tasks
|
||||
ListTasks(projectId *string, page int32, limit int32) (models.Tasks, error)
|
||||
// GetTask returns a parent task as well as all it's child tasks
|
||||
GetTask(id string, projectId string) (models.Tasks, error)
|
||||
GetTask(id string, projectId *string) (models.Tasks, error)
|
||||
// GetTaskByBuildId returns the task of a build (only parent task)
|
||||
GetTaskByBuildId(buildId string) (*models.Task, error)
|
||||
AttachTaskToBuild(buildId string, taskId string) error
|
||||
|
@ -122,7 +122,7 @@ func (a *Access) SetTaskMetadata(id string, metadata *anypb.Any) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Access) GetTask(id string, projectId string) (ret models.Tasks, err error) {
|
||||
func (a *Access) GetTask(id string, projectId *string) (ret models.Tasks, err error) {
|
||||
err = a.query.Select(
|
||||
&ret,
|
||||
`
|
||||
@ -130,7 +130,7 @@ func (a *Access) GetTask(id string, projectId string) (ret models.Tasks, err err
|
||||
select * from tasks
|
||||
where
|
||||
id = $1
|
||||
and project_id = $2
|
||||
and ($2 :: uuid is null or project_id = $2 :: uuid)
|
||||
union all
|
||||
select t.* from tasks t
|
||||
join task_query tq on tq.id = t.parent_task_id
|
||||
|
@ -18,6 +18,7 @@ go_library(
|
||||
"//peridot/builder/v1/workflow",
|
||||
"//peridot/db",
|
||||
"//peridot/db/models",
|
||||
"//peridot/lookaside",
|
||||
"//peridot/proto/v1:pb",
|
||||
"//proto:common",
|
||||
"//servicecatalog",
|
||||
@ -30,6 +31,7 @@ go_library(
|
||||
"@go_googleapis//google/api:httpbody_go_proto",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//credentials/insecure",
|
||||
"@org_golang_google_grpc//status",
|
||||
"@org_golang_google_protobuf//encoding/protojson:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/anypb:go_default_library",
|
||||
|
@ -567,3 +567,77 @@ func (s *Server) RpmImport(ctx context.Context, req *peridotpb.RpmImportRequest)
|
||||
Done: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) RpmLookasideBatchImport(ctx context.Context, req *peridotpb.RpmLookasideBatchImportRequest) (*peridotpb.AsyncTask, error) {
|
||||
if err := req.ValidateAll(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.checkPermission(ctx, ObjectProject, req.ProjectId, PermissionBuild); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
user, err := utils.UserFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
projects, err := s.db.ListProjects(&peridotpb.ProjectFilters{
|
||||
Id: wrapperspb.String(req.ProjectId),
|
||||
})
|
||||
if err != nil {
|
||||
s.log.Errorf("could not list projects in RpmLookasideBatchImport: %v", err)
|
||||
return nil, utils.InternalError
|
||||
}
|
||||
if len(projects) != 1 {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "project %s does not exist", req.ProjectId)
|
||||
}
|
||||
|
||||
rollback := true
|
||||
beginTx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
s.log.Error(err)
|
||||
return nil, utils.InternalError
|
||||
}
|
||||
defer func() {
|
||||
if rollback {
|
||||
_ = beginTx.Rollback()
|
||||
}
|
||||
}()
|
||||
tx := s.db.UseTransaction(beginTx)
|
||||
|
||||
task, err := tx.CreateTask(user, "noarch", peridotpb.TaskType_TASK_TYPE_RPM_LOOKASIDE_BATCH_IMPORT, &req.ProjectId, nil)
|
||||
if err != nil {
|
||||
s.log.Errorf("could not create build task in RpmImport: %v", err)
|
||||
return nil, status.Error(codes.InvalidArgument, "could not create rpm import task")
|
||||
}
|
||||
|
||||
taskProto, err := task.ToProto(true)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "could not marshal task: %v", err)
|
||||
}
|
||||
|
||||
rollback = false
|
||||
err = beginTx.Commit()
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "could not save, try again")
|
||||
}
|
||||
|
||||
_, err = s.temporal.ExecuteWorkflow(
|
||||
context.Background(),
|
||||
client.StartWorkflowOptions{
|
||||
TaskQueue: MainTaskQueue,
|
||||
},
|
||||
s.temporalWorker.WorkflowController.RpmLookasideBatchImportWorkflow,
|
||||
req,
|
||||
task,
|
||||
)
|
||||
if err != nil {
|
||||
s.log.Errorf("could not start rpm lookaside batch import workflow in RpmImport: %v", err)
|
||||
return nil, status.Error(codes.Internal, "could not start rpm lookaside batch import workflow")
|
||||
}
|
||||
|
||||
return &peridotpb.AsyncTask{
|
||||
TaskId: task.ID.String(),
|
||||
Subtasks: []*peridotpb.Subtask{taskProto},
|
||||
Done: false,
|
||||
}, nil
|
||||
}
|
||||
|
@ -32,7 +32,10 @@ package peridotimplv1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
v1 "github.com/authzed/authzed-go/proto/authzed/api/v1"
|
||||
"go.temporal.io/sdk/client"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -154,7 +157,7 @@ func (s *Server) ListProjects(ctx context.Context, req *peridotpb.ListProjectsRe
|
||||
}
|
||||
|
||||
projects, err := s.db.ListProjects(&peridotpb.ProjectFilters{
|
||||
Ids: resources,
|
||||
Ids: utils.Take[string](resources, "global"),
|
||||
})
|
||||
if err != nil {
|
||||
s.log.Errorf("could not list projects: %v", err)
|
||||
@ -403,3 +406,45 @@ func (s *Server) CreateHashedRepositories(ctx context.Context, req *peridotpb.Cr
|
||||
Done: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) LookasideFileUpload(ctx context.Context, req *peridotpb.LookasideFileUploadRequest) (*peridotpb.LookasideFileUploadResponse, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.checkPermission(ctx, ObjectGlobal, ObjectIdPeridot, PermissionManage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
base64DecodedFile, err := base64.StdEncoding.DecodeString(req.File)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hasher := sha256.New()
|
||||
_, err = hasher.Write(base64DecodedFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sha256Sum := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
exists, err := s.storage.Exists(sha256Sum)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "NotFound") {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if exists {
|
||||
return &peridotpb.LookasideFileUploadResponse{
|
||||
Digest: sha256Sum,
|
||||
}, nil
|
||||
}
|
||||
|
||||
_, err = s.storage.PutObjectBytes(sha256Sum, base64DecodedFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &peridotpb.LookasideFileUploadResponse{
|
||||
Digest: sha256Sum,
|
||||
}, nil
|
||||
}
|
||||
|
@ -40,12 +40,14 @@ import (
|
||||
"go.temporal.io/sdk/client"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/status"
|
||||
"io"
|
||||
"net/url"
|
||||
commonpb "peridot.resf.org/common"
|
||||
builderv1 "peridot.resf.org/peridot/builder/v1"
|
||||
peridotdb "peridot.resf.org/peridot/db"
|
||||
"peridot.resf.org/peridot/lookaside"
|
||||
peridotpb "peridot.resf.org/peridot/pb"
|
||||
"peridot.resf.org/servicecatalog"
|
||||
"peridot.resf.org/utils"
|
||||
@ -87,9 +89,11 @@ type Server struct {
|
||||
temporalWorker *builderv1.Worker
|
||||
authz *authzed.Client
|
||||
hydra *hydraclient.OryHydra
|
||||
hydraAdmin *hydraclient.OryHydra
|
||||
storage lookaside.Storage
|
||||
}
|
||||
|
||||
func NewServer(db peridotdb.Access, c client.Client) (*Server, error) {
|
||||
func NewServer(db peridotdb.Access, c client.Client, storage lookaside.Storage) (*Server, error) {
|
||||
temporalWorker, err := builderv1.NewWorker(db, c, MainTaskQueue, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -104,13 +108,22 @@ func NewServer(db peridotdb.Access, c client.Client) (*Server, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse hydra public url, error: %s", err)
|
||||
}
|
||||
|
||||
hydraSDK := hydraclient.NewHTTPClientWithConfig(nil, &hydraclient.TransportConfig{
|
||||
Schemes: []string{publicURL.Scheme},
|
||||
Host: publicURL.Host,
|
||||
BasePath: publicURL.Path,
|
||||
})
|
||||
|
||||
adminURL, err := url.Parse(servicecatalog.HydraAdmin())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse hydra admin url, error: %s", err)
|
||||
}
|
||||
hydraAdminSDK := hydraclient.NewHTTPClientWithConfig(nil, &hydraclient.TransportConfig{
|
||||
Schemes: []string{adminURL.Scheme},
|
||||
Host: adminURL.Host,
|
||||
BasePath: adminURL.Path,
|
||||
})
|
||||
|
||||
return &Server{
|
||||
log: logrus.New(),
|
||||
db: db,
|
||||
@ -118,19 +131,21 @@ func NewServer(db peridotdb.Access, c client.Client) (*Server, error) {
|
||||
temporalWorker: temporalWorker,
|
||||
authz: authz,
|
||||
hydra: hydraSDK,
|
||||
hydraAdmin: hydraAdminSDK,
|
||||
storage: storage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) interceptor(ctx context.Context, req interface{}, usi *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
n := utils.EndInterceptor
|
||||
n = utils.AuthInterceptor(s.hydra, []string{}, false, n)
|
||||
n = utils.AuthInterceptor(s.hydra, s.hydraAdmin, []string{}, false, n)
|
||||
|
||||
return n(ctx, req, usi, handler)
|
||||
}
|
||||
|
||||
func (s *Server) serverInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
n := utils.ServerEndInterceptor
|
||||
n = utils.ServerAuthInterceptor(s.hydra, []string{}, false, n)
|
||||
n = utils.ServerAuthInterceptor(s.hydra, s.hydraAdmin, []string{}, false, n)
|
||||
|
||||
return n(srv, ss, info, handler)
|
||||
}
|
||||
@ -138,9 +153,13 @@ func (s *Server) serverInterceptor(srv interface{}, ss grpc.ServerStream, info *
|
||||
func (s *Server) Run() {
|
||||
res := utils.NewGRPCServer(
|
||||
&utils.GRPCOptions{
|
||||
DialOptions: []grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
},
|
||||
Interceptor: s.interceptor,
|
||||
ServerInterceptor: s.serverInterceptor,
|
||||
ServerOptions: []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(s.interceptor),
|
||||
grpc.StreamInterceptor(s.serverInterceptor),
|
||||
grpc.MaxRecvMsgSize(1024 * 1024 * 1024),
|
||||
},
|
||||
},
|
||||
func(r *utils.Register) {
|
||||
|
@ -51,9 +51,14 @@ func (s *Server) ListTasks(ctx context.Context, req *peridotpb.ListTasksRequest)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var projectId *string
|
||||
if req.ProjectId.Value != "global" {
|
||||
projectId = &req.ProjectId.Value
|
||||
}
|
||||
|
||||
page := utils.MinPage(req.Page)
|
||||
limit := utils.MinLimit(req.Limit)
|
||||
tasks, err := s.db.ListTasks(&req.ProjectId.Value, page, limit)
|
||||
tasks, err := s.db.ListTasks(projectId, page, limit)
|
||||
if err != nil {
|
||||
s.log.Error(err)
|
||||
return nil, utils.InternalError
|
||||
@ -61,12 +66,6 @@ func (s *Server) ListTasks(ctx context.Context, req *peridotpb.ListTasksRequest)
|
||||
var total int64
|
||||
if len(tasks) > 0 {
|
||||
total = tasks[0].Total
|
||||
} else {
|
||||
total, err = s.db.ImportCountInProject(req.ProjectId.Value)
|
||||
if err != nil {
|
||||
s.log.Errorf("could not count imports: %v", err)
|
||||
return nil, utils.CouldNotRetrieveObjects
|
||||
}
|
||||
}
|
||||
|
||||
var asyncTasks []*peridotpb.AsyncTask
|
||||
@ -98,7 +97,12 @@ func (s *Server) GetTask(ctx context.Context, req *peridotpb.GetTaskRequest) (*p
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tasks, err := s.db.GetTask(req.Id, req.ProjectId.Value)
|
||||
var projectId *string
|
||||
if req.ProjectId.Value != "global" {
|
||||
projectId = &req.ProjectId.Value
|
||||
}
|
||||
|
||||
tasks, err := s.db.GetTask(req.Id, projectId)
|
||||
if err != nil {
|
||||
s.log.Error(err)
|
||||
return nil, utils.InternalError
|
||||
@ -135,12 +139,16 @@ func (s *Server) StreamTaskLogs(req *peridotpb.StreamTaskLogsRequest, stream per
|
||||
|
||||
var taskId *string = nil
|
||||
var parentTaskId *string = nil
|
||||
var projectId *string
|
||||
if req.ProjectId != "global" {
|
||||
projectId = &req.ProjectId
|
||||
}
|
||||
if req.Parent {
|
||||
parentTaskId = &req.Id
|
||||
} else {
|
||||
taskId = &req.Id
|
||||
}
|
||||
_, err := s.db.GetTask(req.Id, req.ProjectId)
|
||||
_, err := s.db.GetTask(req.Id, projectId)
|
||||
if err != nil {
|
||||
s.log.Errorf("error getting task: %s", err)
|
||||
return utils.InternalError
|
||||
@ -173,7 +181,7 @@ func (s *Server) StreamTaskLogs(req *peridotpb.StreamTaskLogsRequest, stream per
|
||||
}
|
||||
}
|
||||
|
||||
task, err := s.db.GetTask(req.Id, req.ProjectId)
|
||||
task, err := s.db.GetTask(req.Id, projectId)
|
||||
if err != nil {
|
||||
s.log.Errorf("error getting task: %s", err)
|
||||
return utils.InternalError
|
||||
@ -189,11 +197,23 @@ func (s *Server) CancelTask(ctx context.Context, req *peridotpb.CancelTaskReques
|
||||
if err := req.ValidateAll(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.checkPermission(ctx, ObjectProject, req.ProjectId, PermissionBuild); err != nil {
|
||||
return nil, err
|
||||
|
||||
if req.ProjectId == "global" {
|
||||
if err := s.checkPermission(ctx, ObjectGlobal, ObjectIdPeridot, PermissionManage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err := s.checkPermission(ctx, ObjectProject, req.ProjectId, PermissionBuild); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tasks, err := s.db.GetTask(req.Id, req.ProjectId)
|
||||
var projectId *string
|
||||
if req.ProjectId != "global" {
|
||||
projectId = &req.ProjectId
|
||||
}
|
||||
|
||||
tasks, err := s.db.GetTask(req.Id, projectId)
|
||||
if err != nil {
|
||||
s.log.Error(err)
|
||||
return nil, utils.InternalError
|
||||
|
@ -174,10 +174,8 @@ func (s *Server) Run() {
|
||||
defer wg.Done()
|
||||
res := utils.NewGRPCServer(
|
||||
&utils.GRPCOptions{
|
||||
Timeout: &timeout,
|
||||
ServerOptions: []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(s.interceptor),
|
||||
},
|
||||
Timeout: &timeout,
|
||||
Interceptor: s.interceptor,
|
||||
},
|
||||
func(r *utils.Register) {
|
||||
endpoints := []utils.GrpcEndpointRegister{
|
||||
|
@ -0,0 +1 @@
|
||||
alter table tasks alter column project_id set not null;
|
@ -0,0 +1 @@
|
||||
alter table tasks alter column project_id drop not null;
|
@ -70,6 +70,18 @@ openapi_generator(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
openapi_generator(
|
||||
name = "client_go",
|
||||
additional_properties = {
|
||||
"packageName": "peridotopenapi",
|
||||
"generateInterfaces": "true",
|
||||
"hideGenerationTimestamp": "true",
|
||||
},
|
||||
generator = "go",
|
||||
spec = ":openapi",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "pb",
|
||||
embed = [":peridotpb_go_proto"],
|
||||
|
@ -65,7 +65,7 @@ service BuildService {
|
||||
};
|
||||
}
|
||||
|
||||
// RpmImport imports rpm files into a project
|
||||
// RpmImport imports rpm files into a project (packaged into tar format)
|
||||
rpc RpmImport(RpmImportRequest) returns (AsyncTask) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/projects/{project_id=*}/builds/rpm-import"
|
||||
@ -76,6 +76,18 @@ service BuildService {
|
||||
metadata_type: "RpmImportOperationMetadata"
|
||||
};
|
||||
}
|
||||
|
||||
// RpmLookasideBatchImport imports rpm files into a project (stored in Lookaside)
|
||||
rpc RpmLookasideBatchImport(RpmLookasideBatchImportRequest) returns (AsyncTask) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/projects/{project_id=*}/builds/rpm-lookaside-batch-import"
|
||||
body: "*"
|
||||
};
|
||||
option (resf.peridot.v1.task_info) = {
|
||||
response_type: "RpmLookasideBatchImportTask"
|
||||
metadata_type: "RpmLookasideBatchImportOperationMetadata"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message Build {
|
||||
@ -342,3 +354,26 @@ message RpmImportTask {
|
||||
message RpmImportOperationMetadata {
|
||||
string package_name = 1;
|
||||
}
|
||||
|
||||
message RpmLookasideBatchImportRequest {
|
||||
string project_id = 1;
|
||||
|
||||
// Rpms
|
||||
//
|
||||
// Previously uploaded RPM tarball
|
||||
repeated string lookaside_blobs = 2;
|
||||
|
||||
// Force override
|
||||
//
|
||||
// Overwrite existing RPMs even if NVRA is locked
|
||||
// Useful for secure boot scenarios for example
|
||||
bool force_override = 3;
|
||||
}
|
||||
|
||||
message RpmLookasideBatchImportTask {
|
||||
resf.peridot.yumrepofs.v1.UpdateRepoTask repo_changes = 1;
|
||||
}
|
||||
|
||||
message RpmLookasideBatchImportOperationMetadata {
|
||||
repeated string package_names = 1;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ service PackageService {
|
||||
// for a specific package
|
||||
enum PackageType {
|
||||
// Unknown value. Should never be used
|
||||
PACKAGE_TYPE_UNSPECIFIED = 0;
|
||||
PACKAGE_TYPE_DEFAULT = 0;
|
||||
|
||||
// Normal packages from downstream dist-git
|
||||
// The repos are imported as-is
|
||||
|
@ -75,6 +75,13 @@ service ProjectService {
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
rpc LookasideFileUpload(LookasideFileUploadRequest) returns (LookasideFileUploadResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/lookaside"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Project is a contained RPM distribution
|
||||
@ -291,3 +298,10 @@ message CreateHashedRepositoriesRequest {
|
||||
message CreateHashedRepositoriesTask {
|
||||
repeated string repo_revisions = 1;
|
||||
}
|
||||
|
||||
message LookasideFileUploadRequest {
|
||||
string file = 1 [(validate.rules).string.min_bytes = 1];
|
||||
}
|
||||
message LookasideFileUploadResponse {
|
||||
string digest = 1;
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ service TaskService {
|
||||
}
|
||||
|
||||
enum TaskType {
|
||||
TASK_TYPE_UNSPECIFIED = 0;
|
||||
TASK_TYPE_UNKNOWN = 0;
|
||||
TASK_TYPE_IMPORT = 1;
|
||||
TASK_TYPE_IMPORT_SRC_GIT = 2;
|
||||
TASK_TYPE_IMPORT_SRC_GIT_TO_DIST_GIT = 3;
|
||||
@ -71,6 +71,8 @@ enum TaskType {
|
||||
TASK_TYPE_SYNC_CATALOG = 15;
|
||||
TASK_TYPE_RPM_IMPORT = 16;
|
||||
TASK_TYPE_CREATE_HASHED_REPOSITORIES = 17;
|
||||
TASK_TYPE_LOOKASIDE_FILE_UPLOAD = 18;
|
||||
TASK_TYPE_RPM_LOOKASIDE_BATCH_IMPORT = 19;
|
||||
}
|
||||
|
||||
enum TaskStatus {
|
||||
|
@ -78,7 +78,7 @@ func (s *Server) GetBlob(ctx context.Context, req *yumrepofspb.GetBlobRequest) (
|
||||
}
|
||||
|
||||
header := metadata.Pairs("Location", urlStr)
|
||||
err = grpc.SendHeader(ctx, header)
|
||||
err = grpc.SetHeader(ctx, header)
|
||||
if err != nil {
|
||||
s.log.Errorf("failed to send header: %v", err)
|
||||
return nil, status.Error(codes.Internal, "failed to send header")
|
||||
|
@ -79,7 +79,7 @@ func (s *Server) GetRpm(ctx context.Context, req *yumrepofspb.GetRpmRequest) (*y
|
||||
}
|
||||
|
||||
header := metadata.Pairs("Location", urlStr)
|
||||
err = grpc.SendHeader(ctx, header)
|
||||
err = grpc.SetHeader(ctx, header)
|
||||
if err != nil {
|
||||
s.log.Errorf("failed to send header: %v", err)
|
||||
return nil, status.Error(codes.Internal, "failed to send header")
|
||||
|
@ -88,9 +88,7 @@ func (s *Server) interceptor(ctx context.Context, req interface{}, usi *grpc.Una
|
||||
func (s *Server) Run() {
|
||||
res := utils.NewGRPCServer(
|
||||
&utils.GRPCOptions{
|
||||
ServerOptions: []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(s.interceptor),
|
||||
},
|
||||
Interceptor: s.interceptor,
|
||||
},
|
||||
func(r *utils.Register) {
|
||||
endpoints := []utils.GrpcEndpointRegister{
|
||||
|
@ -71,7 +71,7 @@ func NewServer(db db.Access) *Server {
|
||||
|
||||
func (s *Server) interceptor(ctx context.Context, req interface{}, usi *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
n := utils.EndInterceptor
|
||||
n = utils.AuthInterceptor(s.hydra, []string{}, true, n)
|
||||
n = utils.AuthInterceptor(s.hydra, nil, []string{}, true, n)
|
||||
|
||||
return n(ctx, req, usi, handler)
|
||||
}
|
||||
@ -79,9 +79,7 @@ func (s *Server) interceptor(ctx context.Context, req interface{}, usi *grpc.Una
|
||||
func (s *Server) Run() {
|
||||
res := utils.NewGRPCServer(
|
||||
&utils.GRPCOptions{
|
||||
ServerOptions: []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(s.interceptor),
|
||||
},
|
||||
Interceptor: s.interceptor,
|
||||
},
|
||||
func(r *utils.Register) {
|
||||
err := secparseadminpb.RegisterSecparseAdminHandlerFromEndpoint(
|
||||
|
@ -67,9 +67,7 @@ func (s *Server) interceptor(ctx context.Context, req interface{}, usi *grpc.Una
|
||||
func (s *Server) Run() {
|
||||
res := utils.NewGRPCServer(
|
||||
&utils.GRPCOptions{
|
||||
ServerOptions: []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(s.interceptor),
|
||||
},
|
||||
Interceptor: s.interceptor,
|
||||
},
|
||||
func(r *utils.Register) {
|
||||
err := secparsepb.RegisterSecparseHandlerFromEndpoint(
|
||||
|
@ -33,13 +33,14 @@ package rpmutils
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
nvr *regexp.Regexp
|
||||
nvrNoArch *regexp.Regexp
|
||||
epoch *regexp.Regexp
|
||||
module *regexp.Regexp
|
||||
dist *regexp.Regexp
|
||||
moduleDist *regexp.Regexp
|
||||
advisoryId *regexp.Regexp
|
||||
nvr *regexp.Regexp
|
||||
nvrNoArch *regexp.Regexp
|
||||
nvrUnusualRelease *regexp.Regexp
|
||||
epoch *regexp.Regexp
|
||||
module *regexp.Regexp
|
||||
dist *regexp.Regexp
|
||||
moduleDist *regexp.Regexp
|
||||
advisoryId *regexp.Regexp
|
||||
)
|
||||
|
||||
func NVR() *regexp.Regexp {
|
||||
@ -56,6 +57,13 @@ func NVRNoArch() *regexp.Regexp {
|
||||
return nvrNoArch
|
||||
}
|
||||
|
||||
func NVRUnusualRelease() *regexp.Regexp {
|
||||
if nvrUnusualRelease == nil {
|
||||
nvrUnusualRelease = regexp.MustCompile("^(\\S+)-([\\w~%.+]+)-(\\w+?)(?:\\.(\\w+))?(?:\\.rpm)?$")
|
||||
}
|
||||
return nvrUnusualRelease
|
||||
}
|
||||
|
||||
func Epoch() *regexp.Regexp {
|
||||
if epoch == nil {
|
||||
epoch = regexp.MustCompile("(\\d+):")
|
||||
|
@ -29,10 +29,14 @@ go_library(
|
||||
"//vendor/github.com/go-chi/chi/middleware",
|
||||
"//vendor/github.com/go-openapi/runtime",
|
||||
"//vendor/github.com/go-openapi/strfmt",
|
||||
"//vendor/github.com/grpc-ecosystem/go-grpc-middleware",
|
||||
"//vendor/github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
"//vendor/github.com/jmoiron/sqlx",
|
||||
"//vendor/github.com/lib/pq",
|
||||
"//vendor/github.com/ory/hydra-client-go/client",
|
||||
"//vendor/github.com/ory/hydra-client-go/client/admin",
|
||||
"//vendor/github.com/ory/hydra-client-go/client/public",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"//vendor/github.com/sirupsen/logrus",
|
||||
"//vendor/github.com/spf13/pflag",
|
||||
"//vendor/github.com/spf13/viper",
|
||||
|
@ -32,6 +32,9 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"net"
|
||||
@ -42,6 +45,7 @@ import (
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/go-chi/chi/middleware"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
_ "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
@ -100,12 +104,14 @@ func DefaultServeMuxOption() []runtime.ServeMuxOption {
|
||||
}
|
||||
|
||||
type GRPCOptions struct {
|
||||
DialOptions []grpc.DialOption
|
||||
MuxOptions []runtime.ServeMuxOption
|
||||
ServerOptions []grpc.ServerOption
|
||||
DisableREST bool
|
||||
DisableGRPC bool
|
||||
Timeout *time.Duration
|
||||
DialOptions []grpc.DialOption
|
||||
MuxOptions []runtime.ServeMuxOption
|
||||
ServerOptions []grpc.ServerOption
|
||||
Interceptor grpc.UnaryServerInterceptor
|
||||
ServerInterceptor grpc.StreamServerInterceptor
|
||||
DisableREST bool
|
||||
DisableGRPC bool
|
||||
Timeout *time.Duration
|
||||
}
|
||||
|
||||
type Register struct {
|
||||
@ -153,7 +159,38 @@ func NewGRPCServer(goptions *GRPCOptions, endpoint func(*Register), serve func(*
|
||||
}
|
||||
}
|
||||
|
||||
serv := grpc.NewServer(options.ServerOptions...)
|
||||
opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
// use DialOptions if not nil
|
||||
if options.DialOptions != nil {
|
||||
opts = options.DialOptions
|
||||
}
|
||||
opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1000*1024*1024), grpc.MaxCallSendMsgSize(1000*1024*1024)))
|
||||
|
||||
serverOpts := options.ServerOptions
|
||||
// If the server already declares a unary interceptor, let's chain
|
||||
// and make grpc_prometheus run first
|
||||
if options.Interceptor != nil {
|
||||
serverOpts = append(serverOpts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
|
||||
grpc_prometheus.UnaryServerInterceptor,
|
||||
options.Interceptor,
|
||||
)))
|
||||
} else {
|
||||
// Else, only declare prometheus interceptor
|
||||
serverOpts = append(serverOpts, grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor))
|
||||
}
|
||||
|
||||
// If the server already declares a stream interceptor, let's chain
|
||||
// and make grpc_prometheus run first
|
||||
if options.ServerInterceptor != nil {
|
||||
serverOpts = append(serverOpts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
|
||||
grpc_prometheus.StreamServerInterceptor,
|
||||
options.ServerInterceptor,
|
||||
)))
|
||||
} else {
|
||||
// Else, only declare prometheus interceptor
|
||||
serverOpts = append(serverOpts, grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor))
|
||||
}
|
||||
serv := grpc.NewServer(serverOpts...)
|
||||
|
||||
// background context since this is the "main" app
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
@ -178,12 +215,6 @@ func NewGRPCServer(goptions *GRPCOptions, endpoint func(*Register), serve func(*
|
||||
}
|
||||
|
||||
mux := runtime.NewServeMux(muxOptions...)
|
||||
opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
// use DialOptions if not nil
|
||||
if options.DialOptions != nil {
|
||||
opts = options.DialOptions
|
||||
}
|
||||
opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1000*1024*1024), grpc.MaxCallSendMsgSize(1000*1024*1024)))
|
||||
|
||||
register := &Register{
|
||||
Context: ctx,
|
||||
@ -199,9 +230,9 @@ func NewGRPCServer(goptions *GRPCOptions, endpoint func(*Register), serve func(*
|
||||
r.Mount("/", mux)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
if !options.DisableREST {
|
||||
wg.Add(1)
|
||||
go func(wg *sync.WaitGroup) {
|
||||
logrus.Infof("starting http server on port %s", viper.GetString("api.port"))
|
||||
|
||||
@ -215,22 +246,36 @@ func NewGRPCServer(goptions *GRPCOptions, endpoint func(*Register), serve func(*
|
||||
}
|
||||
|
||||
if !options.DisableGRPC {
|
||||
logrus.Infof("starting grpc server on port %s", viper.GetString("grpc.port"))
|
||||
registerServer := &RegisterServer{
|
||||
Server: serv,
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(wg *sync.WaitGroup) {
|
||||
logrus.Infof("starting grpc server on port %s", viper.GetString("grpc.port"))
|
||||
registerServer := &RegisterServer{
|
||||
Server: serv,
|
||||
}
|
||||
|
||||
if serve != nil {
|
||||
serve(registerServer)
|
||||
}
|
||||
if serve != nil {
|
||||
serve(registerServer)
|
||||
}
|
||||
grpc_prometheus.Register(serv)
|
||||
|
||||
err = serv.Serve(lis)
|
||||
if err != nil {
|
||||
logrus.Fatalf("failed to serve: %v", err)
|
||||
}
|
||||
wg.Done()
|
||||
err = serv.Serve(lis)
|
||||
if err != nil {
|
||||
logrus.Fatalf("failed to serve: %v", err)
|
||||
}
|
||||
wg.Done()
|
||||
}(&wg)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(wg *sync.WaitGroup) {
|
||||
promMux := http.NewServeMux()
|
||||
promMux.Handle("/metrics", promhttp.Handler())
|
||||
err := http.ListenAndServe(":7332", promMux)
|
||||
if err != nil {
|
||||
logrus.Fatalf("could not start prometheus server - %s", err)
|
||||
}
|
||||
}(&wg)
|
||||
|
||||
return &GRPCServerRes{
|
||||
Cancel: cancel,
|
||||
WaitGroup: &wg,
|
||||
|
@ -32,10 +32,13 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/go-openapi/runtime"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/ory/hydra-client-go/client"
|
||||
"github.com/ory/hydra-client-go/client/admin"
|
||||
"github.com/ory/hydra-client-go/client/public"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
@ -69,7 +72,7 @@ func ServerEndInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamS
|
||||
return handler(srv, ss)
|
||||
}
|
||||
|
||||
func checkAuth(ctx context.Context, hydraSDK *client.OryHydra) (context.Context, error) {
|
||||
func checkAuth(ctx context.Context, hydraSDK *client.OryHydra, hydraAdmin *client.OryHydra) (context.Context, error) {
|
||||
// fetch metadata from grpc
|
||||
meta, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
@ -99,6 +102,25 @@ func checkAuth(ctx context.Context, hydraSDK *client.OryHydra) (context.Context,
|
||||
if err != nil {
|
||||
return ctx, err
|
||||
}
|
||||
if userInfo.Payload.Sub == "" && hydraAdmin != nil {
|
||||
introspect, err := hydraAdmin.Admin.IntrospectOAuth2Token(
|
||||
&admin.IntrospectOAuth2TokenParams{
|
||||
Context: ctx,
|
||||
Token: authToken[1],
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logrus.Errorf("error introspecting token: %s", err)
|
||||
return ctx, status.Errorf(codes.Unauthenticated, "invalid authorization token")
|
||||
}
|
||||
|
||||
userInfo.Payload.Sub = introspect.Payload.ClientID
|
||||
userInfo.Payload.Name = introspect.Payload.Sub
|
||||
userInfo.Payload.Email = fmt.Sprintf("%s@%s", introspect.Payload.Sub, "serviceaccount.resf.org")
|
||||
}
|
||||
if userInfo.Payload.Sub == "" {
|
||||
return ctx, status.Errorf(codes.Unauthenticated, "invalid authorization token")
|
||||
}
|
||||
|
||||
// supply subject and token to further requests
|
||||
pairs := metadata.Pairs("x-user-id", userInfo.Payload.Sub, "x-user-name", userInfo.Payload.Name, "x-user-email", userInfo.Payload.Email, "x-auth-token", authToken[1])
|
||||
@ -108,12 +130,12 @@ func checkAuth(ctx context.Context, hydraSDK *client.OryHydra) (context.Context,
|
||||
}
|
||||
|
||||
// AuthInterceptor requires OAuth2 authentication for all routes except listed
|
||||
func AuthInterceptor(hydraSDK *client.OryHydra, excludedMethods []string, enforce bool, next InterceptorFunc) InterceptorFunc {
|
||||
func AuthInterceptor(hydraSDK *client.OryHydra, hydraAdminSDK *client.OryHydra, excludedMethods []string, enforce bool, next InterceptorFunc) InterceptorFunc {
|
||||
return func(ctx context.Context, req interface{}, usi *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
// skip authentication for excluded methods
|
||||
if !StrContains(usi.FullMethod, excludedMethods) {
|
||||
var err error
|
||||
if ctx, err = checkAuth(ctx, hydraSDK); err != nil {
|
||||
if ctx, err = checkAuth(ctx, hydraSDK, hydraAdminSDK); err != nil {
|
||||
if enforce {
|
||||
return nil, err
|
||||
}
|
||||
@ -132,7 +154,7 @@ type serverStream struct {
|
||||
func (ss *serverStream) Context() context.Context {
|
||||
return ss.ctx
|
||||
}
|
||||
func ServerAuthInterceptor(hydraSDK *client.OryHydra, excludedMethods []string, enforce bool, next ServerInterceptorFunc) ServerInterceptorFunc {
|
||||
func ServerAuthInterceptor(hydraSDK *client.OryHydra, hydraAdminSDK *client.OryHydra, excludedMethods []string, enforce bool, next ServerInterceptorFunc) ServerInterceptorFunc {
|
||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
newStream := serverStream{
|
||||
ServerStream: ss,
|
||||
@ -142,7 +164,7 @@ func ServerAuthInterceptor(hydraSDK *client.OryHydra, excludedMethods []string,
|
||||
if !StrContains(info.FullMethod, excludedMethods) {
|
||||
var ctx context.Context
|
||||
var err error
|
||||
if ctx, err = checkAuth(ss.Context(), hydraSDK); err != nil {
|
||||
if ctx, err = checkAuth(ss.Context(), hydraSDK, hydraAdminSDK); err != nil {
|
||||
if enforce {
|
||||
return err
|
||||
}
|
||||
|
@ -102,6 +102,14 @@ func NullTimeToTimestamppb(t sql.NullTime) *timestamppb.Timestamp {
|
||||
return timestamppb.New(t.Time)
|
||||
}
|
||||
|
||||
func NullStringToPointer(s sql.NullString) *string {
|
||||
if !s.Valid {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &s.String
|
||||
}
|
||||
|
||||
func Int64(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
@ -109,3 +117,7 @@ func Int64(i int64) *int64 {
|
||||
func Bool(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func Pointer[T any](t T) *T {
|
||||
return &t
|
||||
}
|
||||
|
@ -50,3 +50,14 @@ func IntersectString(a, b []string) []string {
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func Take[T comparable](a []T, x T) []T {
|
||||
var n []T
|
||||
for _, v := range a {
|
||||
if v != x {
|
||||
n = append(n, v)
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
9
vendor/github.com/beorn7/perks/quantile/BUILD.bazel
generated
vendored
Normal file
9
vendor/github.com/beorn7/perks/quantile/BUILD.bazel
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "quantile",
|
||||
srcs = ["stream.go"],
|
||||
importmap = "peridot.resf.org/vendor/github.com/beorn7/perks/quantile",
|
||||
importpath = "github.com/beorn7/perks/quantile",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
@ -0,0 +1,316 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
15
vendor/github.com/cespare/xxhash/v2/BUILD.bazel
generated
vendored
Normal file
15
vendor/github.com/cespare/xxhash/v2/BUILD.bazel
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "xxhash",
|
||||
srcs = [
|
||||
"xxhash.go",
|
||||
"xxhash_amd64.go",
|
||||
"xxhash_amd64.s",
|
||||
"xxhash_other.go",
|
||||
"xxhash_unsafe.go",
|
||||
],
|
||||
importmap = "peridot.resf.org/vendor/github.com/cespare/xxhash/v2",
|
||||
importpath = "github.com/cespare/xxhash/v2",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
# xxhash
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
3
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
Normal file
3
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/cespare/xxhash/v2
|
||||
|
||||
go 1.11
|
0
vendor/github.com/cespare/xxhash/v2/go.sum
generated
vendored
Normal file
0
vendor/github.com/cespare/xxhash/v2/go.sum
generated
vendored
Normal file
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
@ -0,0 +1,235 @@
|
||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
func New() *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
Normal file
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
|
||||
RET
|
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
//
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
}
|
19
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
19
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
@ -36,11 +36,12 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// TODO(≥go1.18): Use any instead of interface{}.
|
||||
|
||||
// Equal reports whether x and y are equal by recursively applying the
|
||||
// following rules in the given order to x and y and all of their sub-values:
|
||||
//
|
||||
@ -319,7 +320,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
}
|
||||
|
||||
func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
|
||||
v = sanitizeValue(v, f.Type().In(0))
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{v})[0]
|
||||
}
|
||||
@ -343,8 +343,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
|
||||
}
|
||||
|
||||
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
|
||||
x = sanitizeValue(x, f.Type().In(0))
|
||||
y = sanitizeValue(y, f.Type().In(1))
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
}
|
||||
@ -372,19 +370,6 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
|
||||
ret = f.Call(vs)[0]
|
||||
}
|
||||
|
||||
// sanitizeValue converts nil interfaces of type T to those of type R,
|
||||
// assuming that T is assignable to R.
|
||||
// Otherwise, it returns the input value as is.
|
||||
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
|
||||
// TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143).
|
||||
if !flags.AtLeastGo110 {
|
||||
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
|
||||
return reflect.New(t).Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
||||
var addr bool
|
||||
var vax, vay reflect.Value // Addressable versions of vx and vy
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build purego
|
||||
// +build purego
|
||||
|
||||
package cmp
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !purego
|
||||
// +build !purego
|
||||
|
||||
package cmp
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !cmp_debug
|
||||
// +build !cmp_debug
|
||||
|
||||
package diff
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build cmp_debug
|
||||
// +build cmp_debug
|
||||
|
||||
package diff
|
||||
|
6
vendor/github.com/google/go-cmp/cmp/internal/flags/BUILD.bazel
generated
vendored
6
vendor/github.com/google/go-cmp/cmp/internal/flags/BUILD.bazel
generated
vendored
@ -2,11 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "flags",
|
||||
srcs = [
|
||||
"flags.go",
|
||||
"toolchain_legacy.go",
|
||||
"toolchain_recent.go",
|
||||
],
|
||||
srcs = ["flags.go"],
|
||||
importmap = "peridot.resf.org/vendor/github.com/google/go-cmp/cmp/internal/flags",
|
||||
importpath = "github.com/google/go-cmp/cmp/internal/flags",
|
||||
visibility = ["//vendor/github.com/google/go-cmp/cmp:__subpackages__"],
|
||||
|
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package flags
|
||||
|
||||
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
|
||||
const AtLeastGo110 = false
|
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package flags
|
||||
|
||||
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
|
||||
const AtLeastGo110 = true
|
7
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
7
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
@ -9,6 +9,8 @@ import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
|
||||
// TypeString is nearly identical to reflect.Type.String,
|
||||
// but has an additional option to specify that full type names be used.
|
||||
func TypeString(t reflect.Type, qualified bool) string {
|
||||
@ -20,6 +22,11 @@ func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte
|
||||
// of the same name and within the same package,
|
||||
// but declared within the namespace of different functions.
|
||||
|
||||
// Use the "any" alias instead of "interface{}" for better readability.
|
||||
if t == anyType {
|
||||
return append(b, "any"...)
|
||||
}
|
||||
|
||||
// Named type.
|
||||
if t.Name() != "" {
|
||||
if qualified && t.PkgPath() != "" {
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build purego
|
||||
// +build purego
|
||||
|
||||
package value
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !purego
|
||||
// +build !purego
|
||||
|
||||
package value
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
@ -178,7 +178,7 @@ type structField struct {
|
||||
unexported bool
|
||||
mayForce bool // Forcibly allow visibility
|
||||
paddr bool // Was parent addressable?
|
||||
pvx, pvy reflect.Value // Parent values (always addressible)
|
||||
pvx, pvy reflect.Value // Parent values (always addressable)
|
||||
field reflect.StructField // Field information
|
||||
}
|
||||
|
||||
|
5
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
5
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
@ -116,7 +116,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out
|
||||
}
|
||||
|
||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||
if v.MaxDepth == 0 {
|
||||
// As a special case, treat equal []byte as a leaf nodes.
|
||||
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0))
|
||||
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
|
||||
if v.MaxDepth == 0 || isEqualBytes {
|
||||
switch opts.DiffMode {
|
||||
case diffUnknown, diffIdentical:
|
||||
// Format Equal.
|
||||
|
13
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
13
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
@ -207,10 +207,11 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||
// Check whether this is a []byte of text data.
|
||||
if t.Elem() == reflect.TypeOf(byte(0)) {
|
||||
b := v.Bytes()
|
||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) }
|
||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
|
||||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
||||
out = opts.formatString("", string(b))
|
||||
return opts.WithTypeMode(emitType).FormatType(t, out)
|
||||
skipType = true
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,7 +282,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
// Skip the name only if this is an unnamed pointer type.
|
||||
// Otherwise taking the address of a value does not reproduce
|
||||
// the named pointer type.
|
||||
if v.Type().Name() == "" {
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
}
|
||||
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
@ -292,7 +298,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||
}
|
||||
// Interfaces accept different concrete types,
|
||||
// so configure the underlying value to explicitly print the type.
|
||||
skipType = true // Print the concrete type instead
|
||||
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
||||
|
6
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
6
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
@ -80,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
|
||||
}
|
||||
|
||||
// Use specialized string diffing for longer slices or strings.
|
||||
const minLength = 64
|
||||
const minLength = 32
|
||||
return vx.Len() >= minLength && vy.Len() >= minLength
|
||||
}
|
||||
|
||||
@ -563,10 +563,10 @@ func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []d
|
||||
nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
|
||||
ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
|
||||
var numLeadingIdentical, numTrailingIdentical int
|
||||
for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ {
|
||||
for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
|
||||
numLeadingIdentical++
|
||||
}
|
||||
for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ {
|
||||
for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
|
||||
numTrailingIdentical++
|
||||
}
|
||||
if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
|
||||
|
201
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
generated
vendored
Normal file
201
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
#vendor
|
||||
vendor/
|
||||
|
||||
# Created by .ignore support plugin (hsz.mobi)
|
||||
coverage.txt
|
||||
### Go template
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
### Windows template
|
||||
# Windows image file caches
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
|
||||
# Folder config file
|
||||
Desktop.ini
|
||||
|
||||
# Recycle Bin used on file shares
|
||||
$RECYCLE.BIN/
|
||||
|
||||
# Windows Installer files
|
||||
*.cab
|
||||
*.msi
|
||||
*.msm
|
||||
*.msp
|
||||
|
||||
# Windows shortcuts
|
||||
*.lnk
|
||||
### Kate template
|
||||
# Swap Files #
|
||||
.*.kate-swp
|
||||
.swp.*
|
||||
### SublimeText template
|
||||
# cache files for sublime text
|
||||
*.tmlanguage.cache
|
||||
*.tmPreferences.cache
|
||||
*.stTheme.cache
|
||||
|
||||
# workspace files are user-specific
|
||||
*.sublime-workspace
|
||||
|
||||
# project files should be checked into the repository, unless a significant
|
||||
# proportion of contributors will probably not be using SublimeText
|
||||
# *.sublime-project
|
||||
|
||||
# sftp configuration file
|
||||
sftp-config.json
|
||||
### Linux template
|
||||
*~
|
||||
|
||||
# temporary files which can be created if a process still has a handle open of a deleted file
|
||||
.fuse_hidden*
|
||||
|
||||
# KDE directory preferences
|
||||
.directory
|
||||
|
||||
# Linux trash folder which might appear on any partition or disk
|
||||
.Trash-*
|
||||
### JetBrains template
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff:
|
||||
.idea
|
||||
.idea/tasks.xml
|
||||
.idea/dictionaries
|
||||
.idea/vcs.xml
|
||||
.idea/jsLibraryMappings.xml
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/dataSources.ids
|
||||
.idea/dataSources.xml
|
||||
.idea/dataSources.local.xml
|
||||
.idea/sqlDataSources.xml
|
||||
.idea/dynamic.xml
|
||||
.idea/uiDesigner.xml
|
||||
|
||||
# Gradle:
|
||||
.idea/gradle.xml
|
||||
.idea/libraries
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
### Xcode template
|
||||
# Xcode
|
||||
#
|
||||
# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
|
||||
|
||||
## Build generated
|
||||
build/
|
||||
DerivedData/
|
||||
|
||||
## Various settings
|
||||
*.pbxuser
|
||||
!default.pbxuser
|
||||
*.mode1v3
|
||||
!default.mode1v3
|
||||
*.mode2v3
|
||||
!default.mode2v3
|
||||
*.perspectivev3
|
||||
!default.perspectivev3
|
||||
xcuserdata/
|
||||
|
||||
## Other
|
||||
*.moved-aside
|
||||
*.xccheckout
|
||||
*.xcscmblueprint
|
||||
### Eclipse template
|
||||
|
||||
.metadata
|
||||
bin/
|
||||
tmp/
|
||||
*.tmp
|
||||
*.bak
|
||||
*.swp
|
||||
*~.nib
|
||||
local.properties
|
||||
.settings/
|
||||
.loadpath
|
||||
.recommenders
|
||||
|
||||
# Eclipse Core
|
||||
.project
|
||||
|
||||
# External tool builders
|
||||
.externalToolBuilders/
|
||||
|
||||
# Locally stored "Eclipse launch configurations"
|
||||
*.launch
|
||||
|
||||
# PyDev specific (Python IDE for Eclipse)
|
||||
*.pydevproject
|
||||
|
||||
# CDT-specific (C/C++ Development Tooling)
|
||||
.cproject
|
||||
|
||||
# JDT-specific (Eclipse Java Development Tools)
|
||||
.classpath
|
||||
|
||||
# Java annotation processor (APT)
|
||||
.factorypath
|
||||
|
||||
# PDT-specific (PHP Development Tools)
|
||||
.buildpath
|
||||
|
||||
# sbteclipse plugin
|
||||
.target
|
||||
|
||||
# Tern plugin
|
||||
.tern-project
|
||||
|
||||
# TeXlipse plugin
|
||||
.texlipse
|
||||
|
||||
# STS (Spring Tool Suite)
|
||||
.springBeans
|
||||
|
||||
# Code Recommenders
|
||||
.recommenders/
|
||||
|
25
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
generated
vendored
Normal file
25
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
sudo: false
|
||||
language: go
|
||||
# * github.com/grpc/grpc-go still supports go1.6
|
||||
# - When we drop support for go1.6 we can remove golang.org/x/net/context
|
||||
# below as it is part of the Go std library since go1.7
|
||||
# * github.com/prometheus/client_golang already requires at least go1.7 since
|
||||
# September 2017
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- master
|
||||
|
||||
install:
|
||||
- go get github.com/prometheus/client_golang/prometheus
|
||||
- go get google.golang.org/grpc
|
||||
- go get golang.org/x/net/context
|
||||
- go get github.com/stretchr/testify
|
||||
script:
|
||||
- make test
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
25
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/BUILD.bazel
generated
vendored
Normal file
25
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/BUILD.bazel
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go-grpc-prometheus",
|
||||
srcs = [
|
||||
"client.go",
|
||||
"client_metrics.go",
|
||||
"client_reporter.go",
|
||||
"metric_options.go",
|
||||
"server.go",
|
||||
"server_metrics.go",
|
||||
"server_reporter.go",
|
||||
"util.go",
|
||||
],
|
||||
importmap = "peridot.resf.org/vendor/github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//status",
|
||||
"@org_golang_x_net//context",
|
||||
],
|
||||
)
|
24
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
generated
vendored
Normal file
24
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0) - 2018-06-04
|
||||
|
||||
### Added
|
||||
|
||||
* Provide metrics object as `prometheus.Collector`, for conventional metric registration.
|
||||
* Support non-default/global Prometheus registry.
|
||||
* Allow configuring counters with `prometheus.CounterOpts`.
|
||||
|
||||
### Changed
|
||||
|
||||
* Remove usage of deprecated `grpc.Code()`.
|
||||
* Remove usage of deprecated `grpc.Errorf` and replace with `status.Errorf`.
|
||||
|
||||
---
|
||||
|
||||
This changelog was started with version `v1.2.0`, for earlier versions refer to the respective [GitHub releases](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases).
|
201
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE
generated
vendored
Normal file
201
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
247
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
generated
vendored
Normal file
247
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
generated
vendored
Normal file
@ -0,0 +1,247 @@
|
||||
# Go gRPC Interceptors for Prometheus monitoring
|
||||
|
||||
[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus)
|
||||
[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus)
|
||||
[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge)
|
||||
[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus)
|
||||
[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
|
||||
|
||||
[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients.
|
||||
|
||||
A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus).
|
||||
|
||||
## Interceptors
|
||||
|
||||
[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed
|
||||
by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement
|
||||
common patterns: auth, logging and... monitoring.
|
||||
|
||||
To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware).
|
||||
|
||||
## Usage
|
||||
|
||||
There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both.
|
||||
|
||||
### Server-side
|
||||
|
||||
```go
|
||||
import "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
...
|
||||
// Initialize your gRPC server's interceptor.
|
||||
myServer := grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
|
||||
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
|
||||
)
|
||||
// Register your gRPC service implementations.
|
||||
myservice.RegisterMyServiceServer(s.server, &myServiceImpl{})
|
||||
// After all your registrations, make sure all of the Prometheus metrics are initialized.
|
||||
grpc_prometheus.Register(myServer)
|
||||
// Register Prometheus metrics handler.
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
...
|
||||
```
|
||||
|
||||
### Client-side
|
||||
|
||||
```go
|
||||
import "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
...
|
||||
clientConn, err = grpc.Dial(
|
||||
address,
|
||||
grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor)
|
||||
)
|
||||
client = pb_testproto.NewTestServiceClient(clientConn)
|
||||
resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
|
||||
...
|
||||
```
|
||||
|
||||
# Metrics
|
||||
|
||||
## Labels
|
||||
|
||||
All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods
|
||||
contain the same rich labels:
|
||||
|
||||
* `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and
|
||||
the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and
|
||||
`service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"`
|
||||
* `grpc_method` - the name of the method called on the gRPC service. E.g.
|
||||
`grpc_method="Ping"`
|
||||
* `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle).
|
||||
Differentiating between the two is important especially for latency measurements.
|
||||
|
||||
- `unary` is single request, single response RPC
|
||||
- `client_stream` is a multi-request, single response RPC
|
||||
- `server_stream` is a single request, multi-response RPC
|
||||
- `bidi_stream` is a multi-request, multi-response RPC
|
||||
|
||||
|
||||
Additionally for completed RPCs, the following labels are used:
|
||||
|
||||
* `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go).
|
||||
The list of all statuses is to long, but here are some common ones:
|
||||
|
||||
- `OK` - means the RPC was successful
|
||||
- `IllegalArgument` - RPC contained bad values
|
||||
- `Internal` - server-side error not disclosed to the clients
|
||||
|
||||
## Counters
|
||||
|
||||
The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go)
|
||||
the respective Prometheus handler (usually `/metrics`).
|
||||
|
||||
For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts.
|
||||
|
||||
For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto),
|
||||
calling the method `PingList`. The call succeeds and returns 20 messages in the stream.
|
||||
|
||||
First, immediately after the server receives the call it will increment the
|
||||
`grpc_server_started_total` and start the handling time clock (if histograms are enabled).
|
||||
|
||||
```jsoniq
|
||||
grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||
```
|
||||
|
||||
Then the user logic gets invoked. It receives one message from the client containing the request
|
||||
(it's a `server_stream`):
|
||||
|
||||
```jsoniq
|
||||
grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||
```
|
||||
|
||||
The user logic may return an error, or send multiple messages back to the client. In this case, on
|
||||
each of the 20 messages sent back, a counter will be incremented:
|
||||
|
||||
```jsoniq
|
||||
grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
|
||||
```
|
||||
|
||||
After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go))
|
||||
and the relevant call labels increment the `grpc_server_handled_total` counter.
|
||||
|
||||
```jsoniq
|
||||
grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||
```
|
||||
|
||||
## Histograms
|
||||
|
||||
[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
|
||||
to measure latency distributions of your RPCs. However, since it is bad practice to have metrics
|
||||
of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels)
|
||||
the latency monitoring metrics are disabled by default. To enable them please call the following
|
||||
in your server initialization code:
|
||||
|
||||
```jsoniq
|
||||
grpc_prometheus.EnableHandlingTimeHistogram()
|
||||
```
|
||||
|
||||
After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||
variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics:
|
||||
|
||||
* `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method
|
||||
* `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for
|
||||
calculating average handling times
|
||||
* `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective
|
||||
handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/))
|
||||
|
||||
The counter values will look as follows:
|
||||
|
||||
```jsoniq
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1
|
||||
grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001
|
||||
grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||
```
|
||||
|
||||
|
||||
## Useful query examples
|
||||
|
||||
Prometheus philosophy is to provide raw metrics to the monitoring system, and
|
||||
let the aggregations be handled there. The verbosity of above metrics make it possible to have that
|
||||
flexibility. Here's a couple of useful monitoring queries:
|
||||
|
||||
|
||||
### request inbound rate
|
||||
```jsoniq
|
||||
sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service)
|
||||
```
|
||||
For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the
|
||||
rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note
|
||||
how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together.
|
||||
|
||||
### unary request error rate
|
||||
```jsoniq
|
||||
sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
|
||||
```
|
||||
For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the
|
||||
ones that didn't finish with `OK` code.
|
||||
|
||||
### unary request error percentage
|
||||
```jsoniq
|
||||
sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
|
||||
/
|
||||
sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service)
|
||||
* 100.0
|
||||
```
|
||||
For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that
|
||||
this is a combination of the two above examples. This is an example of a query you would like to
|
||||
[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g.
|
||||
"no more than 1% requests should fail".
|
||||
|
||||
### average response stream size
|
||||
```jsoniq
|
||||
sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
|
||||
/
|
||||
sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
|
||||
```
|
||||
For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all `
|
||||
server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows
|
||||
you to track when clients started to send "wide" queries that ret
|
||||
Note the divisor is the number of started RPCs, in order to account for in-flight requests.
|
||||
|
||||
### 99%-tile latency of unary requests
|
||||
```jsoniq
|
||||
histogram_quantile(0.99,
|
||||
sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le)
|
||||
)
|
||||
```
|
||||
For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles)
|
||||
of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile
|
||||
estimation will take samples in a rolling `5m` window. When combined with other quantiles
|
||||
(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system
|
||||
(e.g. impact of caching).
|
||||
|
||||
### percentage of slow unary queries (>250ms)
|
||||
```jsoniq
|
||||
100.0 - (
|
||||
sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service)
|
||||
/
|
||||
sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service)
|
||||
) * 100.0
|
||||
```
|
||||
For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25`
|
||||
seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal)
|
||||
buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps.
|
||||
This is an example of a query you would like to alert on in your system for SLA violations,
|
||||
e.g. "less than 1% of requests are slower than 250ms".
|
||||
|
||||
|
||||
## Status
|
||||
|
||||
This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services at [Improbable](https://improbable.io).
|
||||
|
||||
## License
|
||||
|
||||
`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
39
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
generated
vendored
Normal file
39
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
// gRPC Prometheus monitoring interceptors for client-side gRPC.
|
||||
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultClientMetrics is the default instance of ClientMetrics. It is
|
||||
// intended to be used in conjunction the default Prometheus metrics
|
||||
// registry.
|
||||
DefaultClientMetrics = NewClientMetrics()
|
||||
|
||||
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor()
|
||||
|
||||
// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor()
|
||||
)
|
||||
|
||||
func init() {
|
||||
prom.MustRegister(DefaultClientMetrics.clientStartedCounter)
|
||||
prom.MustRegister(DefaultClientMetrics.clientHandledCounter)
|
||||
prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived)
|
||||
prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent)
|
||||
}
|
||||
|
||||
// EnableClientHandlingTimeHistogram turns on recording of handling time of
|
||||
// RPCs. Histogram metrics can be very expensive for Prometheus to retain and
|
||||
// query. This function acts on the DefaultClientMetrics variable and the
|
||||
// default Prometheus metrics registry.
|
||||
func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...)
|
||||
prom.Register(DefaultClientMetrics.clientHandledHistogram)
|
||||
}
|
170
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
generated
vendored
Normal file
170
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ClientMetrics represents a collection of metrics to be registered on a
|
||||
// Prometheus metrics registry for a gRPC client.
|
||||
type ClientMetrics struct {
|
||||
clientStartedCounter *prom.CounterVec
|
||||
clientHandledCounter *prom.CounterVec
|
||||
clientStreamMsgReceived *prom.CounterVec
|
||||
clientStreamMsgSent *prom.CounterVec
|
||||
clientHandledHistogramEnabled bool
|
||||
clientHandledHistogramOpts prom.HistogramOpts
|
||||
clientHandledHistogram *prom.HistogramVec
|
||||
}
|
||||
|
||||
// NewClientMetrics returns a ClientMetrics object. Use a new instance of
|
||||
// ClientMetrics when not using the default Prometheus metrics registry, for
|
||||
// example when wanting to control which metrics are added to a registry as
|
||||
// opposed to automatically adding metrics via init functions.
|
||||
func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics {
|
||||
opts := counterOptions(counterOpts)
|
||||
return &ClientMetrics{
|
||||
clientStartedCounter: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_started_total",
|
||||
Help: "Total number of RPCs started on the client.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientHandledCounter: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_handled_total",
|
||||
Help: "Total number of RPCs completed by the client, regardless of success or failure.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
|
||||
|
||||
clientStreamMsgReceived: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_msg_received_total",
|
||||
Help: "Total number of RPC stream messages received by the client.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientStreamMsgSent: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_msg_sent_total",
|
||||
Help: "Total number of gRPC stream messages sent by the client.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientHandledHistogramEnabled: false,
|
||||
clientHandledHistogramOpts: prom.HistogramOpts{
|
||||
Name: "grpc_client_handling_seconds",
|
||||
Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
|
||||
Buckets: prom.DefBuckets,
|
||||
},
|
||||
clientHandledHistogram: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Describe sends the super-set of all possible descriptors of metrics
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
// the last descriptor has been sent.
|
||||
func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) {
|
||||
m.clientStartedCounter.Describe(ch)
|
||||
m.clientHandledCounter.Describe(ch)
|
||||
m.clientStreamMsgReceived.Describe(ch)
|
||||
m.clientStreamMsgSent.Describe(ch)
|
||||
if m.clientHandledHistogramEnabled {
|
||||
m.clientHandledHistogram.Describe(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent.
|
||||
func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {
|
||||
m.clientStartedCounter.Collect(ch)
|
||||
m.clientHandledCounter.Collect(ch)
|
||||
m.clientStreamMsgReceived.Collect(ch)
|
||||
m.clientStreamMsgSent.Collect(ch)
|
||||
if m.clientHandledHistogramEnabled {
|
||||
m.clientHandledHistogram.Collect(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
|
||||
// Histogram metrics can be very expensive for Prometheus to retain and query.
|
||||
func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
for _, o := range opts {
|
||||
o(&m.clientHandledHistogramOpts)
|
||||
}
|
||||
if !m.clientHandledHistogramEnabled {
|
||||
m.clientHandledHistogram = prom.NewHistogramVec(
|
||||
m.clientHandledHistogramOpts,
|
||||
[]string{"grpc_type", "grpc_service", "grpc_method"},
|
||||
)
|
||||
}
|
||||
m.clientHandledHistogramEnabled = true
|
||||
}
|
||||
|
||||
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
monitor := newClientReporter(m, Unary, method)
|
||||
monitor.SentMessage()
|
||||
err := invoker(ctx, method, req, reply, cc, opts...)
|
||||
if err != nil {
|
||||
monitor.ReceivedMessage()
|
||||
}
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
monitor := newClientReporter(m, clientStreamType(desc), method)
|
||||
clientStream, err := streamer(ctx, desc, cc, method, opts...)
|
||||
if err != nil {
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return nil, err
|
||||
}
|
||||
return &monitoredClientStream{clientStream, monitor}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func clientStreamType(desc *grpc.StreamDesc) grpcType {
|
||||
if desc.ClientStreams && !desc.ServerStreams {
|
||||
return ClientStream
|
||||
} else if !desc.ClientStreams && desc.ServerStreams {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
}
|
||||
|
||||
// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
|
||||
type monitoredClientStream struct {
|
||||
grpc.ClientStream
|
||||
monitor *clientReporter
|
||||
}
|
||||
|
||||
func (s *monitoredClientStream) SendMsg(m interface{}) error {
|
||||
err := s.ClientStream.SendMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.SentMessage()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *monitoredClientStream) RecvMsg(m interface{}) error {
|
||||
err := s.ClientStream.RecvMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.ReceivedMessage()
|
||||
} else if err == io.EOF {
|
||||
s.monitor.Handled(codes.OK)
|
||||
} else {
|
||||
st, _ := status.FromError(err)
|
||||
s.monitor.Handled(st.Code())
|
||||
}
|
||||
return err
|
||||
}
|
46
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
generated
vendored
Normal file
46
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type clientReporter struct {
|
||||
metrics *ClientMetrics
|
||||
rpcType grpcType
|
||||
serviceName string
|
||||
methodName string
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter {
|
||||
r := &clientReporter{
|
||||
metrics: m,
|
||||
rpcType: rpcType,
|
||||
}
|
||||
if r.metrics.clientHandledHistogramEnabled {
|
||||
r.startTime = time.Now()
|
||||
}
|
||||
r.serviceName, r.methodName = splitMethodName(fullMethod)
|
||||
r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *clientReporter) ReceivedMessage() {
|
||||
r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
}
|
||||
|
||||
func (r *clientReporter) SentMessage() {
|
||||
r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
}
|
||||
|
||||
func (r *clientReporter) Handled(code codes.Code) {
|
||||
r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
|
||||
if r.metrics.clientHandledHistogramEnabled {
|
||||
r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
|
||||
}
|
||||
}
|
16
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile
generated
vendored
Normal file
16
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
SHELL="/bin/bash"
|
||||
|
||||
GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/)
|
||||
|
||||
all: vet fmt test
|
||||
|
||||
fmt:
|
||||
go fmt $(GOFILES_NOVENDOR)
|
||||
|
||||
vet:
|
||||
go vet $(GOFILES_NOVENDOR)
|
||||
|
||||
test: vet
|
||||
./scripts/test_all.sh
|
||||
|
||||
.PHONY: all vet test
|
41
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
generated
vendored
Normal file
41
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// A CounterOption lets you add options to Counter metrics using With* funcs.
|
||||
type CounterOption func(*prom.CounterOpts)
|
||||
|
||||
type counterOptions []CounterOption
|
||||
|
||||
func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts {
|
||||
for _, f := range co {
|
||||
f(&o)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// WithConstLabels allows you to add ConstLabels to Counter metrics.
|
||||
func WithConstLabels(labels prom.Labels) CounterOption {
|
||||
return func(o *prom.CounterOpts) {
|
||||
o.ConstLabels = labels
|
||||
}
|
||||
}
|
||||
|
||||
// A HistogramOption lets you add options to Histogram metrics using With*
|
||||
// funcs.
|
||||
type HistogramOption func(*prom.HistogramOpts)
|
||||
|
||||
// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
|
||||
func WithHistogramBuckets(buckets []float64) HistogramOption {
|
||||
return func(o *prom.HistogramOpts) { o.Buckets = buckets }
|
||||
}
|
||||
|
||||
// WithHistogramConstLabels allows you to add custom ConstLabels to
|
||||
// histograms metrics.
|
||||
func WithHistogramConstLabels(labels prom.Labels) HistogramOption {
|
||||
return func(o *prom.HistogramOpts) {
|
||||
o.ConstLabels = labels
|
||||
}
|
||||
}
|
48
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
generated
vendored
Normal file
48
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
// gRPC Prometheus monitoring interceptors for server-side gRPC.
|
||||
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultServerMetrics is the default instance of ServerMetrics. It is
|
||||
// intended to be used in conjunction the default Prometheus metrics
|
||||
// registry.
|
||||
DefaultServerMetrics = NewServerMetrics()
|
||||
|
||||
// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor()
|
||||
|
||||
// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor()
|
||||
)
|
||||
|
||||
func init() {
|
||||
prom.MustRegister(DefaultServerMetrics.serverStartedCounter)
|
||||
prom.MustRegister(DefaultServerMetrics.serverHandledCounter)
|
||||
prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived)
|
||||
prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent)
|
||||
}
|
||||
|
||||
// Register takes a gRPC server and pre-initializes all counters to 0. This
|
||||
// allows for easier monitoring in Prometheus (no missing metrics), and should
|
||||
// be called *after* all services have been registered with the server. This
|
||||
// function acts on the DefaultServerMetrics variable.
|
||||
func Register(server *grpc.Server) {
|
||||
DefaultServerMetrics.InitializeMetrics(server)
|
||||
}
|
||||
|
||||
// EnableHandlingTimeHistogram turns on recording of handling time
|
||||
// of RPCs. Histogram metrics can be very expensive for Prometheus
|
||||
// to retain and query. This function acts on the DefaultServerMetrics
|
||||
// variable and the default Prometheus metrics registry.
|
||||
func EnableHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
DefaultServerMetrics.EnableHandlingTimeHistogram(opts...)
|
||||
prom.Register(DefaultServerMetrics.serverHandledHistogram)
|
||||
}
|
185
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
generated
vendored
Normal file
185
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ServerMetrics represents a collection of metrics to be registered on a
|
||||
// Prometheus metrics registry for a gRPC server.
|
||||
type ServerMetrics struct {
|
||||
serverStartedCounter *prom.CounterVec
|
||||
serverHandledCounter *prom.CounterVec
|
||||
serverStreamMsgReceived *prom.CounterVec
|
||||
serverStreamMsgSent *prom.CounterVec
|
||||
serverHandledHistogramEnabled bool
|
||||
serverHandledHistogramOpts prom.HistogramOpts
|
||||
serverHandledHistogram *prom.HistogramVec
|
||||
}
|
||||
|
||||
// NewServerMetrics returns a ServerMetrics object. Use a new instance of
|
||||
// ServerMetrics when not using the default Prometheus metrics registry, for
|
||||
// example when wanting to control which metrics are added to a registry as
|
||||
// opposed to automatically adding metrics via init functions.
|
||||
func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics {
|
||||
opts := counterOptions(counterOpts)
|
||||
return &ServerMetrics{
|
||||
serverStartedCounter: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_started_total",
|
||||
Help: "Total number of RPCs started on the server.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
serverHandledCounter: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_handled_total",
|
||||
Help: "Total number of RPCs completed on the server, regardless of success or failure.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
|
||||
serverStreamMsgReceived: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_msg_received_total",
|
||||
Help: "Total number of RPC stream messages received on the server.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
serverStreamMsgSent: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_msg_sent_total",
|
||||
Help: "Total number of gRPC stream messages sent by the server.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
serverHandledHistogramEnabled: false,
|
||||
serverHandledHistogramOpts: prom.HistogramOpts{
|
||||
Name: "grpc_server_handling_seconds",
|
||||
Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
|
||||
Buckets: prom.DefBuckets,
|
||||
},
|
||||
serverHandledHistogram: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// EnableHandlingTimeHistogram enables histograms being registered when
|
||||
// registering the ServerMetrics on a Prometheus registry. Histograms can be
|
||||
// expensive on Prometheus servers. It takes options to configure histogram
|
||||
// options such as the defined buckets.
|
||||
func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
for _, o := range opts {
|
||||
o(&m.serverHandledHistogramOpts)
|
||||
}
|
||||
if !m.serverHandledHistogramEnabled {
|
||||
m.serverHandledHistogram = prom.NewHistogramVec(
|
||||
m.serverHandledHistogramOpts,
|
||||
[]string{"grpc_type", "grpc_service", "grpc_method"},
|
||||
)
|
||||
}
|
||||
m.serverHandledHistogramEnabled = true
|
||||
}
|
||||
|
||||
// Describe sends the super-set of all possible descriptors of metrics
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
// the last descriptor has been sent.
|
||||
func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) {
|
||||
m.serverStartedCounter.Describe(ch)
|
||||
m.serverHandledCounter.Describe(ch)
|
||||
m.serverStreamMsgReceived.Describe(ch)
|
||||
m.serverStreamMsgSent.Describe(ch)
|
||||
if m.serverHandledHistogramEnabled {
|
||||
m.serverHandledHistogram.Describe(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent.
|
||||
func (m *ServerMetrics) Collect(ch chan<- prom.Metric) {
|
||||
m.serverStartedCounter.Collect(ch)
|
||||
m.serverHandledCounter.Collect(ch)
|
||||
m.serverStreamMsgReceived.Collect(ch)
|
||||
m.serverStreamMsgSent.Collect(ch)
|
||||
if m.serverHandledHistogramEnabled {
|
||||
m.serverHandledHistogram.Collect(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
monitor := newServerReporter(m, Unary, info.FullMethod)
|
||||
monitor.ReceivedMessage()
|
||||
resp, err := handler(ctx, req)
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
if err == nil {
|
||||
monitor.SentMessage()
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
monitor := newServerReporter(m, streamRPCType(info), info.FullMethod)
|
||||
err := handler(srv, &monitoredServerStream{ss, monitor})
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// InitializeMetrics initializes all metrics, with their appropriate null
|
||||
// value, for all gRPC methods registered on a gRPC server. This is useful, to
|
||||
// ensure that all metrics exist when collecting and querying.
|
||||
func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) {
|
||||
serviceInfo := server.GetServiceInfo()
|
||||
for serviceName, info := range serviceInfo {
|
||||
for _, mInfo := range info.Methods {
|
||||
preRegisterMethod(m, serviceName, &mInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func streamRPCType(info *grpc.StreamServerInfo) grpcType {
|
||||
if info.IsClientStream && !info.IsServerStream {
|
||||
return ClientStream
|
||||
} else if !info.IsClientStream && info.IsServerStream {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
}
|
||||
|
||||
// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
|
||||
type monitoredServerStream struct {
|
||||
grpc.ServerStream
|
||||
monitor *serverReporter
|
||||
}
|
||||
|
||||
func (s *monitoredServerStream) SendMsg(m interface{}) error {
|
||||
err := s.ServerStream.SendMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.SentMessage()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *monitoredServerStream) RecvMsg(m interface{}) error {
|
||||
err := s.ServerStream.RecvMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.ReceivedMessage()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
|
||||
func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) {
|
||||
methodName := mInfo.Name
|
||||
methodType := string(typeFromMethodInfo(mInfo))
|
||||
// These are just references (no increments), as just referencing will create the labels but not set values.
|
||||
metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
if metrics.serverHandledHistogramEnabled {
|
||||
metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
}
|
||||
for _, code := range allCodes {
|
||||
metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
|
||||
}
|
||||
}
|
46
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
generated
vendored
Normal file
46
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type serverReporter struct {
|
||||
metrics *ServerMetrics
|
||||
rpcType grpcType
|
||||
serviceName string
|
||||
methodName string
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter {
|
||||
r := &serverReporter{
|
||||
metrics: m,
|
||||
rpcType: rpcType,
|
||||
}
|
||||
if r.metrics.serverHandledHistogramEnabled {
|
||||
r.startTime = time.Now()
|
||||
}
|
||||
r.serviceName, r.methodName = splitMethodName(fullMethod)
|
||||
r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *serverReporter) ReceivedMessage() {
|
||||
r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
}
|
||||
|
||||
func (r *serverReporter) SentMessage() {
|
||||
r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
}
|
||||
|
||||
func (r *serverReporter) Handled(code codes.Code) {
|
||||
r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
|
||||
if r.metrics.serverHandledHistogramEnabled {
|
||||
r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
|
||||
}
|
||||
}
|
50
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
generated
vendored
Normal file
50
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type grpcType string
|
||||
|
||||
const (
|
||||
Unary grpcType = "unary"
|
||||
ClientStream grpcType = "client_stream"
|
||||
ServerStream grpcType = "server_stream"
|
||||
BidiStream grpcType = "bidi_stream"
|
||||
)
|
||||
|
||||
var (
|
||||
allCodes = []codes.Code{
|
||||
codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound,
|
||||
codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted,
|
||||
codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal,
|
||||
codes.Unavailable, codes.DataLoss,
|
||||
}
|
||||
)
|
||||
|
||||
func splitMethodName(fullMethodName string) (string, string) {
|
||||
fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
|
||||
if i := strings.Index(fullMethodName, "/"); i >= 0 {
|
||||
return fullMethodName[:i], fullMethodName[i+1:]
|
||||
}
|
||||
return "unknown", "unknown"
|
||||
}
|
||||
|
||||
func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
|
||||
if !mInfo.IsClientStream && !mInfo.IsServerStream {
|
||||
return Unary
|
||||
}
|
||||
if mInfo.IsClientStream && !mInfo.IsServerStream {
|
||||
return ClientStream
|
||||
}
|
||||
if !mInfo.IsClientStream && mInfo.IsServerStream {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
}
|
2
vendor/github.com/json-iterator/go/README.md
generated
vendored
2
vendor/github.com/json-iterator/go/README.md
generated
vendored
@ -8,8 +8,6 @@
|
||||
|
||||
A high-performance 100% compatible drop-in replacement of "encoding/json"
|
||||
|
||||
You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
|
||||
|
||||
# Benchmark
|
||||
|
||||
![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
|
||||
|
2
vendor/github.com/json-iterator/go/go.mod
generated
vendored
2
vendor/github.com/json-iterator/go/go.mod
generated
vendored
@ -6,6 +6,6 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/google/gofuzz v1.0.0
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/stretchr/testify v1.3.0
|
||||
)
|
||||
|
5
vendor/github.com/json-iterator/go/go.sum
generated
vendored
5
vendor/github.com/json-iterator/go/go.sum
generated
vendored
@ -5,11 +5,10 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user