Initial Helm support

This commit is contained in:
Mustafa Gezen 2022-10-31 03:23:40 +01:00
parent 08e6055195
commit 6bc4ea866c
Signed by untrusted user who does not match committer: mustafa
GPG Key ID: DCDF010D946438C1
25 changed files with 782 additions and 284 deletions

View File

@ -179,9 +179,9 @@ go_third_party()
# --start jsonnet-- # --start jsonnet--
http_archive( http_archive(
name = "io_bazel_rules_jsonnet", name = "io_bazel_rules_jsonnet",
sha256 = "d20270872ba8d4c108edecc9581e2bb7f320afab71f8caa2f6394b5202e8a2c3", sha256 = "fa791a38167a198a8b42bfc750ee5642f811ab20649c5517e12719e78d9a133f",
strip_prefix = "rules_jsonnet-0.4.0", strip_prefix = "rules_jsonnet-bd79290c53329db8bc8e3c5b709fbf822d865046",
urls = ["https://github.com/bazelbuild/rules_jsonnet/archive/0.4.0.tar.gz"], urls = ["https://github.com/bazelbuild/rules_jsonnet/archive/bd79290c53329db8bc8e3c5b709fbf822d865046.tar.gz"],
) )
load("@io_bazel_rules_jsonnet//jsonnet:jsonnet.bzl", "jsonnet_repositories") load("@io_bazel_rules_jsonnet//jsonnet:jsonnet.bzl", "jsonnet_repositories")
@ -192,9 +192,12 @@ load("@google_jsonnet_go//bazel:repositories.bzl", "jsonnet_go_repositories")
jsonnet_go_repositories() jsonnet_go_repositories()
load("@google_jsonnet_go//bazel:deps.bzl", "jsonnet_go_dependencies") http_archive(
name = "cpp_jsonnet",
jsonnet_go_dependencies() sha256 = "cbbdddc82c0090881aeff0334b6d60578a15b6fafdb0ac54974840d2b7167d88",
strip_prefix = "jsonnet-60bcf7f097ce7ec2d40ea2b4d0217d1e325f4769",
urls = ["https://github.com/google/jsonnet/archive/60bcf7f097ce7ec2d40ea2b4d0217d1e325f4769.tar.gz"],
)
# --end jsonnet-- # --end jsonnet--
# --start atlassian-- # --start atlassian--
@ -227,3 +230,7 @@ new_local_repository(
load("//bases/bazel:containers.bzl", "containers") load("//bases/bazel:containers.bzl", "containers")
containers() containers()
load("//rules_resf/toolchains:toolchains.bzl", "toolchains_repositories")
toolchains_repositories()

View File

@ -47,7 +47,7 @@ export default async function run(webpackConfig) {
apis: { apis: {
'/api': { '/api': {
prodApiUrl: endpointHttp(svcNameHttp('apollo'), NS('apollo')), prodApiUrl: endpointHttp(svcNameHttp('apollo'), NS('apollo')),
devApiUrl: `https://apollo-dev.internal.rdev.ciq.localhost`, devApiUrl: `https://apollo-dev.internal.pdev.resf.localhost`,
}, },
}, },
port: 9007, port: 9007,

View File

@ -5,53 +5,27 @@ local ociRegistry = std.extVar('oci_registry');
local utils = import 'ci/utils.jsonnet'; local utils = import 'ci/utils.jsonnet';
local user = if domainUser != 'user-orig' then domainUser else origUser; local user = if domainUser != 'user-orig' then domainUser else origUser;
local default_host_port = 'resf-peridot-dev.ctxqgglmfofx.us-east-2.rds.amazonaws.com:5432';
local host_port = if !utils.helm_mode then default_host_port else '{{ .Values.postgresqlHostPort }}';
{ {
host()::
'prod-db-cockroachdb-public.cockroachdb.svc.cluster.local',
port()::
'26257',
host_port()::
'%s:%s' % [$.host(), $.port()],
cert(name)::
'/cockroach-certs/client.%s.crt' % name,
key(name)::
'/cockroach-certs/client.%s.key' % name,
ca()::
'/cockroach-certs/ca.crt',
label()::
{ 'cockroachdb-client': 'true' },
obj_label()::
{ labels: $.label() },
staged_name(name):: staged_name(name)::
'%s%s' % [name, std.strReplace(stage, '-', '')], '%s%s' % [name, if utils.helm_mode then '{{ template !"resf.stage!" . }}!!' else std.strReplace(stage, '-', '')],
dsn_raw(name, password):: dsn_inner(name, no_add=false, ns=null)::
local staged_name = $.staged_name(name);
'postgresql://%s%s@cockroachdb-public.cockroachdb.svc.cluster.local:26257' %
[staged_name, if password then ':byc' else ':REPLACEME'] +
'/%s?sslmode=require&sslcert=/cockroach-certs/client.%s.crt' %
[staged_name, staged_name] +
'&sslkey=/cockroach-certs/client.%s.key&sslrootcert=/cockroach-certs/ca.crt' %
[staged_name],
dsn_legacy(name, no_add=false, ns=null)::
local staged_name = $.staged_name(name); local staged_name = $.staged_name(name);
if utils.local_image then 'postgresql://postgres:postgres@postgres-postgresql.default.svc.cluster.local:5432/%s?sslmode=disable' % staged_name if utils.local_image then 'postgresql://postgres:postgres@postgres-postgresql.default.svc.cluster.local:5432/%s?sslmode=disable' % staged_name
else 'postgresql://%s%s:REPLACEME@resf-peridot-dev.ctxqgglmfofx.us-east-2.rds.amazonaws.com:5432' % else 'postgresql://%s%s:REPLACEME@%s' %
[if !no_add then (if stage == '-dev' then '%s-dev' % user else if ns != null then ns else name)+'-' else '', if no_add then name else staged_name] + [if !no_add then (if utils.helm_mode then '!!{{ .Release.Namespace }}-' else (if stage == '-dev' then '%s-dev' % user else if ns != null then ns else name)+'-') else '', if no_add then name else staged_name, host_port] +
'/%s?sslmode=disable' % '/%s?sslmode=disable' %
[if no_add then name else staged_name], [if no_add then name else staged_name],
dsn(name):: dsn(name, no_add=false, ns=null)::
$.dsn_raw(name, false), local res = $.dsn_inner(name, no_add, ns);
if utils.helm_mode then '!!{{ if .Values.databaseUrl }}{{ .Values.databaseUrl }}{{ else }}%s{{end}}!!' % res else res,
dsn_legacy(name, no_add=false, ns=null)::
$.dsn(name, no_add, ns),
} }

View File

@ -8,12 +8,14 @@ local site = std.extVar('site');
local arch = std.extVar('arch'); local arch = std.extVar('arch');
local localEnvironment = std.extVar('local_environment') == '1'; local localEnvironment = std.extVar('local_environment') == '1';
local stageNoDash = std.strReplace(stage, '-', '');
local imagePullPolicy = if stageNoDash == 'dev' then 'Always' else 'IfNotPresent';
local utils = import 'ci/utils.jsonnet'; local utils = import 'ci/utils.jsonnet';
local helm_mode = utils.helm_mode;
local stage = utils.stage;
local user = utils.user;
local stageNoDash = utils.stage_no_dash;
local imagePullPolicy = if stageNoDash == 'dev' then 'Always' else 'IfNotPresent';
local defaultEnvs = [ local defaultEnvs = [
{ {
name: 'RESF_ENV', name: 'RESF_ENV',
@ -24,6 +26,10 @@ local defaultEnvs = [
valueFrom: true, valueFrom: true,
field: 'metadata.namespace', field: 'metadata.namespace',
}, },
{
name: 'RESF_FORCE_NS',
value: if helm_mode then '{{ .Values.catalogForceNs | default !"!" }}' else '',
},
{ {
name: 'RESF_SERVICE_ACCOUNT', name: 'RESF_SERVICE_ACCOUNT',
valueFrom: true, valueFrom: true,
@ -31,7 +37,7 @@ local defaultEnvs = [
}, },
{ {
name: 'AWS_REGION', name: 'AWS_REGION',
value: 'us-east-2', value: if helm_mode then '{{ .Values.awsRegion | default !"us-east-2!" }}' else 'us-east-2',
}, },
{ {
name: 'LOCALSTACK_ENDPOINT', name: 'LOCALSTACK_ENDPOINT',
@ -40,19 +46,20 @@ local defaultEnvs = [
]; ];
local define_env(envsOrig) = std.filter(function(x) x != null, [ local define_env(envsOrig) = std.filter(function(x) x != null, [
if field != null then { if field != null then std.prune({
name: field.name, name: field.name,
value: if std.objectHas(field, 'value') then field.value, value: if std.objectHas(field, 'value') then field.value,
valueFrom: if std.objectHas(field, 'valueFrom') && field.valueFrom == true then { valueFrom: if std.objectHas(field, 'valueFrom') && field.valueFrom == true then {
secretKeyRef: if std.objectHas(field, 'secret') then { secretKeyRef: if std.objectHas(field, 'secret') then {
name: field.secret.name, name: field.secret.name,
key: field.secret.key, key: field.secret.key,
optional: if std.objectHas(field.secret, 'optional') then field.secret.optional else false,
}, },
fieldRef: if std.objectHas(field, 'field') then { fieldRef: if std.objectHas(field, 'field') then {
fieldPath: field.field, fieldPath: field.field,
}, },
}, },
} })
for field in (envsOrig + defaultEnvs) for field in (envsOrig + defaultEnvs)
]); ]);
@ -65,7 +72,6 @@ local define_volumes(volumes) = [
emptyDir: if std.objectHas(vm, 'emptyDir') then {}, emptyDir: if std.objectHas(vm, 'emptyDir') then {},
secret: if std.objectHas(vm, 'secret') then vm.secret, secret: if std.objectHas(vm, 'secret') then vm.secret,
configMap: if std.objectHas(vm, 'configMap') then vm.configMap, configMap: if std.objectHas(vm, 'configMap') then vm.configMap,
hostPath: if std.objectHas(vm, 'hostPath') then vm.hostPath,
} }
for vm in volumes for vm in volumes
]; ];
@ -99,7 +105,7 @@ local fix_metadata(metadata) = metadata {
namespace: metadata.namespace, namespace: metadata.namespace,
}; };
local prod() = stage == '-prod'; local prod() = stage != '-dev';
local dev() = stage == '-dev'; local dev() = stage == '-dev';
{ {
@ -134,7 +140,7 @@ local dev() = stage == '-dev';
env: if !std.objectHas(deporig, 'env') then [] else deporig.env, env: if !std.objectHas(deporig, 'env') then [] else deporig.env,
ports: if !std.objectHas(deporig, 'ports') then [{ containerPort: 80, protocol: 'TCP' }] else deporig.ports, ports: if !std.objectHas(deporig, 'ports') then [{ containerPort: 80, protocol: 'TCP' }] else deporig.ports,
initContainers: if !std.objectHas(deporig, 'initContainers') then [] else deporig.initContainers, initContainers: if !std.objectHas(deporig, 'initContainers') then [] else deporig.initContainers,
limits: if std.objectHas(deporig, 'limits') then deporig.limits, limits: if !std.objectHas(deporig, 'limits') || deporig.limits == null then { cpu: '0.1', memory: '256M' } else deporig.limits,
requests: if !std.objectHas(deporig, 'requests') || deporig.requests == null then { cpu: '0.001', memory: '128M' } else deporig.requests, requests: if !std.objectHas(deporig, 'requests') || deporig.requests == null then { cpu: '0.001', memory: '128M' } else deporig.requests,
}; };
@ -184,7 +190,7 @@ local dev() = stage == '-dev';
command: if std.objectHas(deployment, 'command') then deployment.command else null, command: if std.objectHas(deployment, 'command') then deployment.command else null,
args: if std.objectHas(deployment, 'args') then deployment.args else null, args: if std.objectHas(deployment, 'args') then deployment.args else null,
ports: deployment.ports, ports: deployment.ports,
env: define_env(deployment.env), env: if std.objectHas(deployment, 'env') then define_env(deployment.env) else [],
volumeMounts: if std.objectHas(deployment, 'volumes') && deployment.volumes != null then define_volume_mounts(deployment.volumes), volumeMounts: if std.objectHas(deployment, 'volumes') && deployment.volumes != null then define_volume_mounts(deployment.volumes),
securityContext: { securityContext: {
runAsGroup: if std.objectHas(deployment, 'fsGroup') then deployment.fsGroup else null, runAsGroup: if std.objectHas(deployment, 'fsGroup') then deployment.fsGroup else null,
@ -200,7 +206,7 @@ local dev() = stage == '-dev';
port: deployment.health.port, port: deployment.health.port,
httpHeaders: [ httpHeaders: [
{ {
name: 'byc-internal-req', name: 'resf-internal-req',
value: 'yes', value: 'yes',
}, },
], ],
@ -212,7 +218,7 @@ local dev() = stage == '-dev';
periodSeconds: if std.objectHas(deployment.health, 'periodSeconds') then deployment.health.periodSeconds else 3, periodSeconds: if std.objectHas(deployment.health, 'periodSeconds') then deployment.health.periodSeconds else 3,
timeoutSeconds: if std.objectHas(deployment.health, 'timeoutSeconds') then deployment.health.timeoutSeconds else 5, timeoutSeconds: if std.objectHas(deployment.health, 'timeoutSeconds') then deployment.health.timeoutSeconds else 5,
successThreshold: if std.objectHas(deployment.health, 'successThreshold') then deployment.health.successThreshold else 1, successThreshold: if std.objectHas(deployment.health, 'successThreshold') then deployment.health.successThreshold else 1,
failureThreshold: if std.objectHas(deployment.health, 'failureTreshold') then deployment.health.failureTreshold else 30, failureThreshold: if std.objectHas(deployment.health, 'failureThreshold') then deployment.health.failureThreshold else 30,
} else if std.objectHas(deployment, 'health_tcp') && deployment.health_tcp != null then { } else if std.objectHas(deployment, 'health_tcp') && deployment.health_tcp != null then {
tcpSocket: { tcpSocket: {
port: deployment.health_tcp.port, port: deployment.health_tcp.port,
@ -263,7 +269,7 @@ local dev() = stage == '-dev';
}, },
}, },
restartPolicy: 'Always', restartPolicy: 'Always',
imagePullSecrets: if std.objectHas(deployment, 'imagePullSecrets') && deployment.imagePullSecrets != null then [ imagePullSecrets: if std.objectHas(deployment, 'imagePullSecrets') && deployment.imagePullSecrets != null then if std.type(deployment.imagePullSecrets) == 'string' then deployment.imagePullSecrets else [
{ {
name: secret, name: secret,
} }
@ -305,7 +311,7 @@ local dev() = stage == '-dev';
], ],
}, },
}], }],
} + ({ } + (if !helm_mode then {} else {
tls: [{ tls: [{
hosts: [ hosts: [
host, host,
@ -415,7 +421,7 @@ local dev() = stage == '-dev';
spec: { spec: {
automountServiceAccountToken: true, automountServiceAccountToken: true,
serviceAccountName: if std.objectHas(job, 'serviceAccount') then job.serviceAccount, serviceAccountName: if std.objectHas(job, 'serviceAccount') then job.serviceAccount,
imagePullSecrets: if std.objectHas(job, 'imagePullSecrets') && job.imagePullSecrets != null then [ imagePullSecrets: if std.objectHas(job, 'imagePullSecrets') && job.imagePullSecrets != null then if std.type(job.imagePullSecrets) == 'string' then job.imagePullSecrets else [
{ {
name: secret, name: secret,
} }
@ -430,7 +436,7 @@ local dev() = stage == '-dev';
env: define_env(job.env), env: define_env(job.env),
volumeMounts: if std.objectHas(job, 'volumes') && job.volumes != null then define_volume_mounts(job.volumes), volumeMounts: if std.objectHas(job, 'volumes') && job.volumes != null then define_volume_mounts(job.volumes),
}], }],
restartPolicy: 'Never', restartPolicy: if std.objectHas(job, 'restartPolicy') then job.restartPolicy else 'Never',
volumes: if std.objectHas(job, 'volumes') && job.volumes != null then define_volumes(job.volumes), volumes: if std.objectHas(job, 'volumes') && job.volumes != null then define_volumes(job.volumes),
}, },
}, },
@ -444,7 +450,7 @@ local dev() = stage == '-dev';
apiVersion: 'v1', apiVersion: 'v1',
kind: 'ServiceAccount', kind: 'ServiceAccount',
metadata: metadata { metadata: metadata {
name: metadata.name + '-serviceaccount', name: metadata.name,
}, },
}, },
@ -475,6 +481,9 @@ local dev() = stage == '-dev';
rules: rules, rules: rules,
}, },
define_cluster_role_v2(metadataOrig, name, rules)::
$.define_cluster_role(metadataOrig { name: '%s-%s' % [metadataOrig.name, name] }, rules),
// RoleBinding // RoleBinding
define_role_binding(metadataOrig, roleName, subjects):: define_role_binding(metadataOrig, roleName, subjects)::
local metadata = fix_metadata(metadataOrig); local metadata = fix_metadata(metadataOrig);
@ -514,6 +523,12 @@ local dev() = stage == '-dev';
}, },
subjects: subjects, subjects: subjects,
}, },
bind_to_cluster_role_sa(role, serviceAccount)::
$.define_cluster_role_binding(role.metadata, role.metadata.name, [{
kind: 'ServiceAccount',
name: serviceAccount,
namespace: role.metadata.namespace,
}]),
// PersistentVolumeClaim // PersistentVolumeClaim
define_persistent_volume_claim(metadataOrig, storage, access_mode='ReadWriteOnce'):: define_persistent_volume_claim(metadataOrig, storage, access_mode='ReadWriteOnce')::
@ -546,69 +561,6 @@ local dev() = stage == '-dev';
data: data, data: data,
}, },
request_cdb_certs_volumes()::
[
{
name: 'client-certs',
path: '/cockroach-certs',
emptyDir: true,
},
],
request_cdb_certs(user)::
{
name: 'init-certs',
image: ociRegistryDocker + '/cockroachdb/cockroach-k8s-request-cert',
tag: '0.4',
annotations: {
'sidecar.istio.io/inject': 'false',
},
command: [
'/bin/ash',
],
args: [
'-ecx',
'/request-cert -namespace=${POD_NAMESPACE} -certs-dir=/cockroach-certs -type=client -user=' + user + ' -symlink-ca-from=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt && ' +
'chown -R 1000:1000 /cockroach-certs',
],
volumes: $.request_cdb_certs_volumes(),
env: [{
name: 'POD_NAMESPACE',
valueFrom: true,
field: 'metadata.namespace',
}],
},
cdb_sa_roles(metadataOrig)::
local metadata = fix_metadata(metadataOrig);
{
apiVersion: 'v1',
kind: 'List',
items: [
$.define_service_account(metadataOrig),
$.define_role(metadataOrig, [{
apiGroups: [''],
resources: ['secrets'],
verbs: ['create', 'get'],
}]),
$.define_role_binding(metadataOrig, metadata.name + '-role', [{
kind: 'ServiceAccount',
name: metadata.name + '-serviceaccount',
namespace: metadata.namespace,
}]),
$.define_cluster_role(metadataOrig, [{
apiGroups: ['certificates.k8s.io'],
resources: ['certificatesigningrequests'],
verbs: ['create', 'get', 'watch'],
}]),
$.define_cluster_role_binding(metadataOrig, metadata.name + '-clusterrole', [{
kind: 'ServiceAccount',
name: metadata.name + '-serviceaccount',
namespace: metadata.namespace,
}]),
],
},
chown_vm(name, path, id, volumes):: chown_vm(name, path, id, volumes)::
{ {
name: 'chown-vm-' + name, name: 'chown-vm-' + name,

View File

@ -1,58 +1,76 @@
local stage = std.extVar('stage');
local origUser = std.extVar('user');
local domainUser = std.extVar('domain_user');
local ociRegistry = std.extVar('oci_registry'); local ociRegistry = std.extVar('oci_registry');
local ociRegistryRepo = std.extVar('oci_registry_repo'); local ociRegistryRepo = std.extVar('oci_registry_repo');
local registry_secret = std.extVar('registry_secret'); local registry_secret = std.extVar('registry_secret');
local user = if domainUser != 'user-orig' then domainUser else origUser;
local stageNoDash = std.strReplace(stage, '-', '');
local kubernetes = import 'ci/kubernetes.jsonnet'; local kubernetes = import 'ci/kubernetes.jsonnet';
local db = import 'ci/db.jsonnet'; local db = import 'ci/db.jsonnet';
local mappings = import 'ci/mappings.jsonnet'; local mappings = import 'ci/mappings.jsonnet';
local utils = import 'ci/utils.jsonnet'; local utils = import 'ci/utils.jsonnet';
local helm_mode = utils.helm_mode;
local stage = utils.stage;
local user = utils.user;
local stageNoDash = utils.stage_no_dash;
local slugify_ = function (x) std.asciiLower(std.substr(x, 0, 1)) + std.substr(x, 1, std.length(x)-1);
local slugify = function (name, extra_remove, str) slugify_(std.join('', [std.asciiUpper(std.substr(x, 0, 1)) + std.asciiLower(std.substr(x, 1, std.length(x)-1)) for x in std.split(std.strReplace(std.strReplace(str, std.asciiUpper(name)+'_', ''), extra_remove, ''), '_')]));
// Can be used to add common labels or annotations
local labels = { local labels = {
labels: db.label() + kubernetes.istio_labels(), labels: kubernetes.istio_labels(),
}; };
// We're using a helper manifestYamlStream function to fix some general issues with it for Helm templates manually.
// Currently Helm functions should use !" instead of " only for strings.
// If a value doesn't start with a Helm bracket but ends with one, then end the value with !! (and the opposite for start).
local manifestYamlStream = function (value, indent_array_in_object=false, c_document_end=false, quote_keys=false)
std.strReplace(std.strReplace(std.strReplace(std.strReplace(std.strReplace(std.manifestYamlStream(std.filter(function (x) x != null, value), indent_array_in_object, c_document_end, quote_keys), '!\\"', '"'), '"{{', '{{'), '}}"', '}}'), '}}!!', '}}'), '!!{{', '{{');
{ {
user():: user,
new(info):: new(info)::
local metadata = { local metadata_init = {
name: info.name, name: info.name,
namespace: if stageNoDash == 'dev' then '%s-dev' % user else if std.objectHas(info, 'namespace') then info.namespace else info.name, namespace: if helm_mode then '{{ .Release.Namespace }}' else (if stageNoDash == 'dev' then '%s-dev' % user else if std.objectHas(info, 'namespace') then info.namespace else info.name),
}; };
local default_labels_all = {
'app.kubernetes.io/name': if helm_mode then '{{ template !"%s.name!" . }}' % info.name else info.name,
};
local default_labels_helm = if helm_mode then {
'helm.sh/chart': '{{ template !"%s.chart!" . }}' % info.name,
'app.kubernetes.io/managed-by': '{{ .Release.Service }}',
'app.kubernetes.io/instance': '{{ .Release.Name }}',
'app.kubernetes.io/version': info.tag,
} else {};
local default_labels = default_labels_all + default_labels_helm;
local metadata = metadata_init + { labels: default_labels };
local fixed = kubernetes.fix_metadata(metadata); local fixed = kubernetes.fix_metadata(metadata);
local vshost(srv) = '%s-service.%s.svc.cluster.local' % [srv.name, fixed.namespace]; local vshost(srv) = '%s-service.%s.svc.cluster.local' % [srv.name, fixed.namespace];
local infolabels = if info.backend then labels else { labels: kubernetes.istio_labels() }; local infolabels = if info.backend then labels else { labels: kubernetes.istio_labels() };
local dbname = (if std.objectHas(info, 'dbname') then info.dbname else info.name); local dbname = (if std.objectHas(info, 'dbname') then info.dbname else info.name);
local env = if std.objectHas(info, 'env') then info.env else []; local env = std.filter(function (x) x != null, [if x != null then if (!std.endsWith(x.name, 'DATABASE_URL') && std.objectHas(x, 'value') && x.value != null) && std.findSubstr('{{', x.value) == null then x {
local sa_name = '%s-%s-serviceaccount' % [stageNoDash, fixed.name]; value: if helm_mode then '{{ .Values.%s | default !"%s!"%s }}' % [slugify(info.name, if std.objectHas(info, 'helm_strip_prefix') then info.helm_strip_prefix else ' ', x.name), x.value, if x.value == 'true' || x.value == 'false' then ' | quote' else ''] else x.value,
} else x for x in (if std.objectHas(info, 'env') then info.env else [])]);
local extra_info = { local sa_default = fixed.name;
service_account_name: sa_name, local sa_name = if helm_mode then '{{ .Values.serviceAccountName | default !"%s!" }}' % [fixed.name] else sa_default;
};
local envs = [stageNoDash]; local envs = [stageNoDash];
local ports = info.ports + (if std.objectHas(info, 'disableMetrics') && info.disableMetrics then [] else [{ local disableMetrics = std.objectHas(info, 'disableMetrics') && info.disableMetrics;
local ports = (if std.objectHas(info, 'ports') then info.ports else []) + (if disableMetrics then [] else [{
name: 'metrics', name: 'metrics',
containerPort: 7332, containerPort: 7332,
protocol: 'TCP', protocol: 'TCP',
}]); }]);
local services = if std.objectHas(info, 'services') then info.services else local services = if std.objectHas(info, 'services') then info.services else
[{ name: '%s-%s-%s' % [metadata.name, port.name, env], port: port.containerPort, portName: port.name, expose: if std.objectHas(port, 'expose') then port.expose else false } for env in envs for port in ports]; [{ name: '%s-%s-%s' % [metadata.name, port.name, env], port: port.containerPort, portName: port.name, expose: if std.objectHas(port, 'expose') then port.expose else false } for env in envs for port in ports];
local nssa = '001-ns-sa.yaml'; local file_yaml_prefix = if helm_mode then 'helm-' else '';
local migrate = '002-migrate.yaml'; local nssa = '%s001-ns-sa.yaml' % file_yaml_prefix;
local deployment = '003-deployment.yaml'; local migrate = '%s002-migrate.yaml' % file_yaml_prefix;
local svcVsDr = '004-svc-vs-dr.yaml'; local deployment = '%s003-deployment.yaml' % file_yaml_prefix;
local custom = '005-custom.yaml'; local svcVsDr = '%s004-svc-vs-dr.yaml' % file_yaml_prefix;
local custom = '%s005-custom.yaml' % file_yaml_prefix;
local legacyDb = if std.objectHas(info, 'legacyDb') then info.legacyDb else false;
local dbPassEnv = { local dbPassEnv = {
name: 'DATABASE_PASSWORD', name: 'DATABASE_PASSWORD',
@ -61,52 +79,66 @@ local labels = {
secret: if !utils.local_image then { secret: if !utils.local_image then {
name: '%s-database-password' % db.staged_name(dbname), name: '%s-database-password' % db.staged_name(dbname),
key: 'password', key: 'password',
optional: '{{ if .Values.databaseUrl }}true{{ else }}false{{ end }}',
}, },
}; };
local shouldSecureEndpoint(srv) = if mappings.get_env_from_svc(srv.name) == 'prod' && mappings.is_external(srv.name) then false local ingress_annotations = {
'kubernetes.io/tls-acme': 'true',
'cert-manager.io/cluster-issuer': if utils.helm_mode then '!!{{ if .Values.overrideClusterIssuer }}{{ .Values.overrideClusterIssuer }}{{ else }}letsencrypt-{{ template !"resf.stage!" . }}{{ end }}!!' else 'letsencrypt-staging',
} + (if utils.local_image || !info.backend then {
'konghq.com/https-redirect-status-code': '301',
} else {});
// Helm mode doesn't need this as the deployer/operator should configure it themselves
local shouldSecureEndpoint(srv) = if helm_mode then false else (if mappings.get_env_from_svc(srv.name) == 'prod' && mappings.is_external(srv.name) then false
else if mappings.should_expose_all(srv.name) then false else if mappings.should_expose_all(srv.name) then false
else if utils.local_image then false else if utils.local_image then false
else if !std.objectHas(srv, 'expose') || !srv.expose then false else if !std.objectHas(srv, 'expose') || !srv.expose then false
else true; else true);
local imagePullSecrets = if registry_secret != 'none' then [registry_secret] else []; local imagePullSecrets = if helm_mode then '{{ if .Values.imagePullSecrets }}[{{ range .Values.imagePullSecrets }}{ name: {{.}} },{{ end }}]{{ else }}null{{end}}' else (if registry_secret != 'none' then [registry_secret] else []);
local migrate_image = if std.objectHas(info, 'migrate_image') && info.migrate_image != null then info.migrate_image else info.image;
local migrate_tag = if std.objectHas(info, 'migrate_tag') && info.migrate_tag != null then info.migrate_tag else info.tag;
local stage_in_resource = if helm_mode then '%s!!' % stage else stage;
local image = if helm_mode then '{{ ((.Values.image).repository) | default !"%s!" }}' % info.image else info.image;
local tag = if helm_mode then '{{ ((.Values.image).tag) | default !"%s!" }}' % info.tag else info.tag;
local extra_info = {
service_account_name: sa_name,
imagePullSecrets: imagePullSecrets,
image: image,
tag: tag,
};
{ {
[nssa]: std.manifestYamlStream([ [nssa]: (if helm_mode then '{{ if not .Values.serviceAccountName }}\n' else '') + manifestYamlStream([
kubernetes.define_namespace(metadata.namespace, infolabels), // disable namespace creation in helm mode
kubernetes.define_service_account(metadata { if !helm_mode then kubernetes.define_namespace(metadata.namespace, infolabels),
name: '%s-%s' % [stageNoDash, fixed.name], kubernetes.define_service_account(
} + if std.objectHas(info, 'service_account_options') then info.service_account_options else {}, metadata {
name: fixed.name,
} + if std.objectHas(info, 'service_account_options') then info.service_account_options else {}
), ),
]), ]) + (if helm_mode then '{{ end }}' else ''),
[if std.objectHas(info, 'migrate') && info.migrate == true then migrate else null]: [if std.objectHas(info, 'migrate') && info.migrate == true then migrate else null]:
std.manifestYamlStream([ manifestYamlStream([
kubernetes.define_service_account(metadata { kubernetes.define_service_account(metadata {
name: 'init-db-%s-%s' % [fixed.name, stageNoDash], name: 'init-db-%s' % [fixed.name],
}), }),
kubernetes.define_role_binding(metadata, metadata.name + '-role', [{
kind: 'ServiceAccount',
name: 'init-db-%s-%s-serviceaccount' % [fixed.name, stageNoDash],
namespace: metadata.namespace,
}]),
kubernetes.define_cluster_role_binding(metadata, metadata.name + '-clusterrole', [{
kind: 'ServiceAccount',
name: 'init-db-%s-%s-serviceaccount' % [fixed.name, stageNoDash],
namespace: metadata.namespace,
}]),
kubernetes.define_role( kubernetes.define_role(
metadata { metadata {
name: 'init-db-%s-%s' % [fixed.name, stageNoDash], name: 'init-db-%s-%s' % [fixed.name, fixed.namespace],
namespace: 'initdb%s' % stage_in_resource,
}, },
[{ [{
apiGroups: [''], apiGroups: [''],
resources: ['secrets'], resources: ['secrets'],
verbs: ['create', 'get'], verbs: ['get'],
}] }]
), ),
kubernetes.define_cluster_role( kubernetes.define_role(
metadata { metadata {
name: 'init-db-%s-%s' % [fixed.name, stageNoDash], name: 'init-db-%s' % [fixed.name],
}, },
[{ [{
apiGroups: [''], apiGroups: [''],
@ -116,133 +148,159 @@ local labels = {
), ),
kubernetes.define_role_binding( kubernetes.define_role_binding(
metadata { metadata {
name: 'init-db-%s-%s' % [fixed.name, stageNoDash], name: 'init-db-%s-%s' % [fixed.name, fixed.namespace],
namespace: 'initdb%s' % stage_in_resource,
}, },
'init-db-%s-%s-role' % [fixed.name, stageNoDash], 'init-db-%s-%s-role' % [fixed.name, fixed.namespace],
[{ [{
kind: 'ServiceAccount', kind: 'ServiceAccount',
name: 'init-db-%s-%s-serviceaccount' % [fixed.name, stageNoDash], name: 'init-db-%s' % [fixed.name],
namespace: fixed.namespace, namespace: fixed.namespace,
}], }],
), ),
kubernetes.define_cluster_role_binding( kubernetes.define_role_binding(
metadata { metadata {
name: 'init-db-%s-%s' % [fixed.name, stageNoDash], name: 'init-db-%s' % [fixed.name],
}, },
'init-db-%s-%s-clusterrole' % [fixed.name, stageNoDash], 'init-db-%s-role' % [fixed.name],
[{ [{
kind: 'ServiceAccount', kind: 'ServiceAccount',
name: 'init-db-%s-%s-serviceaccount' % [fixed.name, stageNoDash], name: 'init-db-%s' % [fixed.name],
namespace: fixed.namespace, namespace: fixed.namespace,
}], }],
), ),
if !legacyDb then kubernetes.define_job(metadata { name: 'request-cert' }, kubernetes.request_cdb_certs('initdb%s' % stageNoDash) + { if info.migrate == true && dbname != '' then kubernetes.define_job(
serviceAccount: '%s-%s-serviceaccount' % [stageNoDash, fixed.name], metadata {
}), name: info.name + '-migrate',
if info.migrate == true && dbname != '' then kubernetes.define_job(metadata { name: info.name + '-migrate' }, { annotations: (if helm_mode then {
image: if std.objectHas(info, 'migrate_image') && info.migrate_image != null then info.migrate_image else info.image, 'helm.sh/hook': 'post-install,post-upgrade',
tag: if std.objectHas(info, 'migrate_tag') && info.migrate_tag != null then info.migrate_tag else info.tag, 'helm.sh/hook-weight': '-5',
command: if std.objectHas(info, 'migrate_command') && info.migrate_command != null then info.migrate_command else ['/bin/sh'], 'helm.sh/hook-delete-policy': 'before-hook-creation',
serviceAccount: 'init-db-%s-%s-serviceaccount' % [fixed.name, stageNoDash], } else {}),
imagePullSecrets: imagePullSecrets,
args: if std.objectHas(info, 'migrate_args') && info.migrate_args != null then info.migrate_args else [
'-c',
'export REAL_DSN=`echo $%s | sed -e "s/REPLACEME/${DATABASE_PASSWORD}/g"%s`; /usr/bin/migrate -source file:///migrations -database $REAL_DSN up' % [info.dsn.name, if legacyDb then '' else ' | sed -e "s/postgresql/cockroachdb/g"'],
],
volumes: (if std.objectHas(info, 'volumes') then info.volumes(metadata) else []) + (if !legacyDb then kubernetes.request_cdb_certs_volumes() else []),
initContainers: [
if !legacyDb then kubernetes.request_cdb_certs('%s%s' % [metadata.name, stageNoDash]) + {
serviceAccount: '%s-%s-serviceaccount' % [stageNoDash, fixed.name],
},
{
name: 'initdb',
image: 'quay.io/peridot/initdb:v0.1.4',
command: ['/bin/sh'],
args: ['-c', '/bundle/initdb*'],
volumes: if !legacyDb then kubernetes.request_cdb_certs_volumes(),
env: [
{
name: 'INITDB_TARGET_DB',
value: db.staged_name(dbname),
},
{
name: 'INITDB_PRODUCTION',
value: 'true',
},
{
name: 'INITDB_DATABASE_URL',
value: if legacyDb then db.dsn_legacy('postgres', true) else db.dsn('initdb'),
},
],
},
],
env: [
dbPassEnv,
info.dsn,
],
annotations: {
'sidecar.istio.io/inject': 'false',
'linkerd.io/inject': 'disabled',
}, },
}) else {}, {
image: if helm_mode then '{{ if ((.Values.migrate_image).repository) }}{{ .Values.migrate_image.repository }}{{ else }}{{ ((.Values.image).repository) | default !"%s!" }}{{ end }}' % migrate_image else migrate_image,
tag: if helm_mode then '{{ if ((.Values.migrate_image).tag) }}{{ .Values.migrate_image.tag }}{{ else }}{{ ((.Values.image).tag) | default !"%s!" }}{{ end }}' % migrate_tag else migrate_tag,
command: if std.objectHas(info, 'migrate_command') && info.migrate_command != null then info.migrate_command else ['/bin/sh'],
serviceAccount: 'init-db-%s' % [fixed.name],
imagePullSecrets: imagePullSecrets,
args: if std.objectHas(info, 'migrate_args') && info.migrate_args != null then info.migrate_args else [
'-c',
'export REAL_DSN=`echo $%s | sed -e "s/REPLACEME/${DATABASE_PASSWORD}/g"`; /usr/bin/migrate -source file:///migrations -database $REAL_DSN up' % [info.dsn.name],
],
volumes: (if std.objectHas(info, 'volumes') then info.volumes(metadata) else []),
initContainers: [
{
name: 'initdb',
image: 'quay.io/peridot/initdb:v0.1.5',
command: ['/bin/sh'],
args: ['-c', '/bundle/initdb*'],
env: [
{
name: 'INITDB_TARGET_DB',
value: db.staged_name(dbname),
},
{
name: 'INITDB_PRODUCTION',
value: 'true',
},
{
name: 'INITDB_DATABASE_URL',
value: db.dsn('postgres', true),
},
{
name: 'INITDB_SKIP',
value: if helm_mode then '!!{{ if .Values.databaseUrl }}true{{ else }}false{{ end }}!!' else 'false',
},
],
},
],
env: [
dbPassEnv,
info.dsn,
],
annotations: {
'sidecar.istio.io/inject': 'false',
'linkerd.io/inject': 'disabled',
},
}
) else {},
]), ]),
[deployment]: std.manifestYamlStream([ [deployment]: manifestYamlStream([
kubernetes.define_deployment( kubernetes.define_deployment(
metadata, metadata {
annotations: if helm_mode then {
'resf.org/calculated-image': info.image,
'resf.org/calculated-tag': info.tag,
} else null
},
{ {
replicas: if std.objectHas(info, 'replicas') then info.replicas else 1, replicas: if helm_mode then '{{ .Values.replicas | default !"1!" }}' else (if std.objectHas(info, 'replicas') then info.replicas else 1),
image: info.image, image: image,
tag: info.tag, tag: tag,
command: if std.objectHas(info, 'command') then [info.command] else null, command: if std.objectHas(info, 'command') then [info.command] else null,
fsGroup: if std.objectHas(info, 'fsGroup') then info.fsGroup else null, fsGroup: if std.objectHas(info, 'fsGroup') then info.fsGroup else null,
fsUser: if std.objectHas(info, 'fsUser') then info.fsUser else null, fsUser: if std.objectHas(info, 'fsUser') then info.fsUser else null,
imagePullSecrets: imagePullSecrets, imagePullSecrets: imagePullSecrets,
labels: db.label(), annotations: (if std.objectHas(info, 'annotations') then info.annotations else {}) + (if disableMetrics then {} else {
annotations: (if std.objectHas(info, 'annotations') then info.annotations else {}) + {
'prometheus.io/scrape': 'true', 'prometheus.io/scrape': 'true',
'prometheus.io/port': '7332', 'prometheus.io/port': '7332',
}, }),
initContainers: if !legacyDb && info.backend then [kubernetes.request_cdb_certs('%s%s' % [metadata.name, stageNoDash]) + { volumes: (if std.objectHas(info, 'volumes') then info.volumes(metadata) else []),
serviceAccount: '%s-%s-serviceaccount' % [stageNoDash, fixed.name],
}],
volumes: (if std.objectHas(info, 'volumes') then info.volumes(metadata) else []) + (if !legacyDb then kubernetes.request_cdb_certs_volumes() else []),
ports: std.map(function(x) x { expose: null, external: null }, ports), ports: std.map(function(x) x { expose: null, external: null }, ports),
health: if std.objectHas(info, 'health') then info.health, health: if std.objectHas(info, 'health') then info.health,
env: env + (if dbname != '' && info.backend then ([dbPassEnv]) else []) + [ env: env + (if dbname != '' && info.backend then ([dbPassEnv]) else []) + [
{ {
name: 'SELF_IDENTITY', name: 'SELF_IDENTITY',
value: 'spiffe://cluster.local/ns/%s/sa/%s-%s-serviceaccount' % [fixed.namespace, stageNoDash, fixed.name], value: 'spiffe://cluster.local/ns/%s/sa/%s' % [fixed.namespace, fixed.name],
}, },
] + [ ] + [
if std.objectHas(srv, 'expose') && srv.expose then { if std.objectHas(srv, 'expose') && srv.expose then (if helm_mode then {
name: '%s_PUBLIC_URL' % [std.asciiUpper(std.strReplace(std.strReplace(srv.name, stage, ''), '-', '_'))],
value: 'https://{{ .Values.%s.ingressHost }}!!' % [srv.portName],
} else {
name: '%s_PUBLIC_URL' % [std.asciiUpper(std.strReplace(std.strReplace(srv.name, stage, ''), '-', '_'))], name: '%s_PUBLIC_URL' % [std.asciiUpper(std.strReplace(std.strReplace(srv.name, stage, ''), '-', '_'))],
value: 'https://%s' % mappings.get(srv.name, user), value: 'https://%s' % mappings.get(srv.name, user),
} else null, }) else null,
for srv in services], for srv in services],
limits: if std.objectHas(info, 'limits') then info.limits, limits: if std.objectHas(info, 'limits') then info.limits,
requests: if std.objectHas(info, 'requests') then info.requests, requests: if std.objectHas(info, 'requests') then info.requests,
args: if std.objectHas(info, 'args') then info.args else [], args: if std.objectHas(info, 'args') then info.args else [],
serviceAccount: '%s-%s-serviceaccount' % [stageNoDash, fixed.name], serviceAccount: sa_name,
}, },
), ),
]), ]),
[svcVsDr]: [svcVsDr]:
std.manifestYamlStream( manifestYamlStream(
[kubernetes.define_service( [kubernetes.define_service(
metadata { metadata {
name: srv.name, name: srv.name,
annotations: { annotations: {
'konghq.com/protocol': std.strReplace(std.strReplace(std.strReplace(srv.name, metadata.name, ''), stage, ''), '-', ''), 'konghq.com/protocol': std.strReplace(std.strReplace(std.strReplace(srv.name, metadata.name, ''), stage, ''), '-', ''),
'ingress.kubernetes.io/service-upstream': 'true',
} }
}, },
srv.port, srv.port,
srv.port, srv.port,
portName=srv.portName, portName=srv.portName,
selector=metadata.name, selector=metadata.name,
env=mappings.get_env_from_svc(srv.name), env=mappings.get_env_from_svc(srv.name)
) for srv in services] + ) for srv in services] +
[kubernetes.define_virtual_service(metadata { name: srv.name + '-internal' }, { if !helm_mode then [] else [if std.objectHas(srv, 'expose') && srv.expose then kubernetes.define_ingress(
metadata {
name: srv.name,
annotations: ingress_annotations + {
'kubernetes.io/ingress.class': '{{ .Values.ingressClass | default !"!" }}',
// Secure only by default
// This produces https, grpcs, etc.
// todo(mustafa): check if we need to add an exemption to a protocol (TCP comes to mind)
'konghq.com/protocols': '{{ .Values.kongProtocols | default !"%ss!"' % std.strReplace(std.strReplace(std.strReplace(srv.name, metadata.name, ''), stage, ''), '-', ''),
}
},
host=if helm_mode then '{{ .Values.%s.ingressHost }}' % srv.portName else mappings.get(srv.name, user),
port=srv.port,
srvName=srv.name + '-service',
) else null for srv in services] +
if helm_mode then [] else [kubernetes.define_virtual_service(metadata { name: srv.name + '-internal' }, {
hosts: [vshost(srv)], hosts: [vshost(srv)],
gateways: [], gateways: [],
http: [ http: [
@ -259,7 +317,7 @@ local labels = {
}, },
], ],
},) for srv in services] + },) for srv in services] +
[if std.objectHas(srv, 'expose') && srv.expose then kubernetes.define_virtual_service( if helm_mode then [] else [if std.objectHas(srv, 'expose') && srv.expose then kubernetes.define_virtual_service(
metadata { metadata {
name: srv.name, name: srv.name,
annotations: { annotations: {
@ -284,7 +342,7 @@ local labels = {
], ],
} }
) else null for srv in services] + ) else null for srv in services] +
[{ if helm_mode then [] else [{
apiVersion: 'security.istio.io/v1beta1', apiVersion: 'security.istio.io/v1beta1',
kind: 'RequestAuthentication', kind: 'RequestAuthentication',
metadata: metadata { metadata: metadata {
@ -305,7 +363,7 @@ local labels = {
}] else [], }] else [],
}, },
} for srv in services] + } for srv in services] +
[{ if helm_mode then [] else [{
apiVersion: 'security.istio.io/v1beta1', apiVersion: 'security.istio.io/v1beta1',
kind: 'AuthorizationPolicy', kind: 'AuthorizationPolicy',
metadata: metadata { metadata: metadata {
@ -330,7 +388,7 @@ local labels = {
}], }],
}, },
} for srv in services] + } for srv in services] +
[kubernetes.define_destination_rule(metadata { name: srv.name }, { if helm_mode then [] else [kubernetes.define_destination_rule(metadata { name: srv.name }, {
host: vshost(srv), host: vshost(srv),
trafficPolicy: { trafficPolicy: {
tls: { tls: {
@ -349,6 +407,6 @@ local labels = {
},) for srv in services] },) for srv in services]
), ),
[if std.objectHas(info, 'custom_job_items') then custom else null]: [if std.objectHas(info, 'custom_job_items') then custom else null]:
std.manifestYamlStream(if std.objectHas(info, 'custom_job_items') then info.custom_job_items(metadata, extra_info) else [{}]), manifestYamlStream(if std.objectHas(info, 'custom_job_items') then info.custom_job_items(metadata, extra_info) else [{}]),
}, },
} }

30
ci/s3.jsonnet Normal file
View File

@ -0,0 +1,30 @@
local utils = import 'ci/utils.jsonnet';
{
kube_env(prefix): [
{
name: '%s_S3_ENDPOINT' % prefix,
value: if utils.helm_mode then '{{ .Values.s3Endpoint | default !"!" }}' else if utils.local_image then 'minio.default.svc.cluster.local:9000' else '',
},
{
name: '%s_S3_DISABLE_SSL' % prefix,
value: if utils.helm_mode then '{{ .Values.s3DisableSsl | default !"false!" | quote }}' else if utils.local_image then 'true' else 'false',
},
{
name: '%s_S3_FORCE_PATH_STYLE' % prefix,
value: if utils.helm_mode then '{{ .Values.s3ForcePathStyle | default !"false!" | quote }}' else if utils.local_image then 'true' else 'false',
},
{
name: '%s_S3_REGION' % prefix,
value: if utils.helm_mode then '{{ .Values.awsRegion | default !"us-east-2!" }}' else 'us-east-2',
},
{
name: '%s_S3_BUCKET' % prefix,
value: if utils.helm_mode then '{{ .Values.s3Bucket | default !"!" }}' else '',
},
{
name: '%s_S3_ASSUME_ROLE' % prefix,
value: if utils.helm_mode then '{{ .Values.s3AssumeRole | default !"!" }}' else '',
},
],
}

View File

@ -2,10 +2,20 @@ local stage = std.extVar('stage');
local ociRegistry = std.extVar('oci_registry'); local ociRegistry = std.extVar('oci_registry');
local ociRegistryDocker = std.extVar('oci_registry_docker'); local ociRegistryDocker = std.extVar('oci_registry_docker');
local localEnvironment = std.extVar('local_environment'); local localEnvironment = std.extVar('local_environment');
local origUser = std.extVar('user');
local domainUser = std.extVar('domain_user');
local localImage = if localEnvironment == "1" then true else false; local localImage = if localEnvironment == "1" then true else false;
local helm_mode = std.extVar('helm_mode') == 'true';
local stage = if helm_mode then '-{{ template !"resf.stage!" . }}' else std.extVar('stage');
local user = if domainUser != 'user-orig' then domainUser else origUser;
local stage_no_dash = std.strReplace(stage, '-', '');
{ {
local_image: localImage, local_image: if helm_mode then false else localImage,
docker_hub_image(name): "%s/%s" % [ociRegistryDocker, name], docker_hub_image(name): "%s/%s" % [ociRegistryDocker, name],
helm_mode: false, helm_mode: helm_mode,
stage: stage,
user: user,
stage_no_dash: stage_no_dash,
} }

View File

@ -62,8 +62,8 @@ export default async function (opts) {
const app = express(); const app = express();
app.use(function (req, res, next) { app.use(function (req, res, next) {
// Including byc-internal-req: 1 should return the Z page // Including resf-internal-req: 1 should return the Z page
if (req.header('byc-internal-req') === 'yes') { if (req.header('resf-internal-req') === 'yes') {
appZ(req, res, next); appZ(req, res, next);
} else { } else {
next(); next();

View File

@ -15,5 +15,7 @@ peridot_k8s(
name = "peridotserver", name = "peridotserver",
src = "deploy.jsonnet", src = "deploy.jsonnet",
outs = RESFDEPLOY_OUTS_MIGRATE, outs = RESFDEPLOY_OUTS_MIGRATE,
chart_yaml = "Chart.yaml",
values_yaml = "values.yaml",
deps = ["//ci"], deps = ["//ci"],
) )

View File

@ -0,0 +1,6 @@
apiVersion: v2
name: peridotserver
description: Helm chart for peridotserver
type: application
version: 0.0.1
appVersion: "0.0.1"

View File

@ -3,9 +3,11 @@ local db = import 'ci/db.jsonnet';
local kubernetes = import 'ci/kubernetes.jsonnet'; local kubernetes = import 'ci/kubernetes.jsonnet';
local temporal = import 'ci/temporal.jsonnet'; local temporal = import 'ci/temporal.jsonnet';
local utils = import 'ci/utils.jsonnet'; local utils = import 'ci/utils.jsonnet';
local s3 = import 'ci/s3.jsonnet';
resfdeploy.new({ resfdeploy.new({
name: 'peridotserver', name: 'peridotserver',
helm_strip_prefix: 'PERIDOT_',
replicas: if kubernetes.prod() then 5 else 1, replicas: if kubernetes.prod() then 5 else 1,
dbname: 'peridot', dbname: 'peridot',
backend: true, backend: true,
@ -31,7 +33,7 @@ resfdeploy.new({
}, },
service_account_options: { service_account_options: {
annotations: { annotations: {
'eks.amazonaws.com/role-arn': 'arn:aws:iam::893168113496:role/peridot_k8s_role', 'eks.amazonaws.com/role-arn': if utils.helm_mode then '{{ .Values.awsRoleArn | default !"!" }}' else 'arn:aws:iam::893168113496:role/peridot_k8s_role',
} }
}, },
ports: [ ports: [
@ -55,26 +57,18 @@ resfdeploy.new({
name: 'PERIDOT_PRODUCTION', name: 'PERIDOT_PRODUCTION',
value: if kubernetes.dev() then 'false' else 'true', value: if kubernetes.dev() then 'false' else 'true',
}, },
if utils.local_image then { {
name: 'PERIDOT_S3_ENDPOINT', name: 'HYDRA_PUBLIC_HTTP_ENDPOINT_OVERRIDE',
value: 'minio.default.svc.cluster.local:9000' value: if utils.helm_mode then '{{ .Values.hydraPublicEndpoint | default !"!" }}' else '',
}, },
if utils.local_image then { {
name: 'PERIDOT_S3_DISABLE_SSL', name: 'HYDRA_ADMIN_HTTP_ENDPOINT_OVERRIDE',
value: 'true' value: if utils.helm_mode then '{{ .Values.hydraAdminEndpoint | default !"!" }}' else '',
}, },
if utils.local_image then { {
name: 'PERIDOT_S3_FORCE_PATH_STYLE', name: 'SPICEDB_GRPC_ENDPOINT_OVERRIDE',
value: 'true' value: if utils.helm_mode then '{{ .Values.spicedbEndpoint | default !"!" }}' else '',
},
if kubernetes.prod() then {
name: 'PERIDOT_S3_REGION',
value: 'us-east-2',
},
if kubernetes.prod() then {
name: 'PERIDOT_S3_BUCKET',
value: 'resf-peridot-prod',
}, },
$.dsn, $.dsn,
] + temporal.kube_env('PERIDOT'), ] + s3.kube_env('PERIDOT') + temporal.kube_env('PERIDOT'),
}) })

View File

@ -0,0 +1,5 @@
# Ports under requires ingressHost to be set during deploy
http:
ingressHost: null
temporalHostPort: workflow-temporal-frontend.workflow.svc.cluster.local:7233

View File

@ -26,7 +26,7 @@ config_setting(
], ],
) )
platform( config_setting(
name = "linux_x86_64", name = "linux_x86_64",
constraint_values = [ constraint_values = [
"@platforms//os:linux", "@platforms//os:linux",
@ -34,7 +34,23 @@ platform(
], ],
) )
platform( config_setting(
name = "linux_arm64",
constraint_values = [
"@platforms//os:linux",
"@platforms//cpu:arm64",
],
)
config_setting(
name = "darwin_x86_64",
constraint_values = [
"@platforms//os:macos",
"@platforms//cpu:x86_64",
],
)
config_setting(
name = "darwin_arm64", name = "darwin_arm64",
constraint_values = [ constraint_values = [
"@platforms//os:macos", "@platforms//os:macos",

View File

@ -1,6 +1,7 @@
load("//rules_resf/internal/resf_bundle:resf_bundle.bzl", _resf_bundle = "resf_bundle", _resf_bundle_run = "resf_bundle_run") load("//rules_resf/internal/resf_bundle:resf_bundle.bzl", _resf_bundle = "resf_bundle", _resf_bundle_run = "resf_bundle_run")
load("//rules_resf/internal/k8s:k8s.bzl", _k8s_apply = "k8s_apply") load("//rules_resf/internal/k8s:k8s.bzl", _k8s_apply = "k8s_apply")
load("//rules_resf/internal/container:container.bzl", _container = "container", _migration_tar = "migration_tar") load("//rules_resf/internal/container:container.bzl", _container = "container", _migration_tar = "migration_tar")
load("//rules_resf/internal/helm:helm_chart.bzl", _helm_chart = "helm_chart")
load("@io_bazel_rules_jsonnet//jsonnet:jsonnet.bzl", "jsonnet_to_json") load("@io_bazel_rules_jsonnet//jsonnet:jsonnet.bzl", "jsonnet_to_json")
load("@build_bazel_rules_nodejs//:index.bzl", "nodejs_binary") load("@build_bazel_rules_nodejs//:index.bzl", "nodejs_binary")
load("@com_github_atlassian_bazel_tools//:multirun/def.bzl", "multirun") load("@com_github_atlassian_bazel_tools//:multirun/def.bzl", "multirun")
@ -9,6 +10,7 @@ resf_bundle = _resf_bundle
k8s_apply = _k8s_apply k8s_apply = _k8s_apply
container = _container container = _container
migration_tar = _migration_tar migration_tar = _migration_tar
helm_chart = _helm_chart
RESFDEPLOY_OUTS_BASE = [ RESFDEPLOY_OUTS_BASE = [
"001-ns-sa.yaml", "001-ns-sa.yaml",
@ -34,8 +36,7 @@ def tag_default_update(defaults, append):
tdict.update(append) tdict.update(append)
return tdict return tdict
# to find the correct kind during ci run def gen_from_jsonnet(name, src, outs, tags, force_normal_tags, helm_mode, **kwargs):
def peridot_k8s(name, src, tags = [], outs = [], static = False, prod_only = False, dependent_push = [], force_normal_tags = False, **kwargs):
ext_str_nested = "{STABLE_OCI_REGISTRY_NO_NESTED_SUPPORT_IN_2022_SHAME_ON_YOU_AWS}" ext_str_nested = "{STABLE_OCI_REGISTRY_NO_NESTED_SUPPORT_IN_2022_SHAME_ON_YOU_AWS}"
if force_normal_tags: if force_normal_tags:
ext_str_nested = "false" ext_str_nested = "false"
@ -51,7 +52,10 @@ def peridot_k8s(name, src, tags = [], outs = [], static = False, prod_only = Fal
"domain_user": "{STABLE_DOMAIN_USER}", "domain_user": "{STABLE_DOMAIN_USER}",
"registry_secret": "{STABLE_REGISTRY_SECRET}", "registry_secret": "{STABLE_REGISTRY_SECRET}",
"site": "{STABLE_SITE}", "site": "{STABLE_SITE}",
"helm_mode": "false",
} }
if helm_mode:
ext_strs["helm_mode"] = "true"
jsonnet_to_json( jsonnet_to_json(
name = name, name = name,
src = src, src = src,
@ -84,6 +88,24 @@ def peridot_k8s(name, src, tags = [], outs = [], static = False, prod_only = Fal
**kwargs **kwargs
) )
# to find the correct kind during ci run
def peridot_k8s(name, src, tags = [], outs = [], static = False, prod_only = False, dependent_push = [], force_normal_tags = False, chart_yaml = None, values_yaml = None, **kwargs):
gen_from_jsonnet(name, src, outs, tags, force_normal_tags, False, **kwargs)
if chart_yaml != None:
if values_yaml == None:
fail("values_yaml is required when chart_yaml is provided")
new_outs = ["helm-%s" % o for o in outs]
gen_from_jsonnet("%s-helm" % name, src, new_outs, tags, force_normal_tags, True, **kwargs)
helm_chart(
name = "%s.helm" % name,
package_name = name,
chart_yaml = chart_yaml,
values_yaml = values_yaml,
srcs = new_outs,
tags = ["manual"]
)
k8s_apply( k8s_apply(
name = "%s.apply" % name, name = "%s.apply" % name,
srcs = [":%s" % name], srcs = [":%s" % name],

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,27 @@
package(default_visibility = ["//visibility:public"])
exports_files([
".helmignore",
"_helpers.tpl",
"helm.bash",
])
alias(
name = "helm_tool",
actual = select({
"//platforms:linux_x86_64": "@helm3_linux_x86_64//:helm",
"//platforms:linux_arm64": "@helm3_linux_arm64//:helm",
"//platforms:darwin_x86_64": "@helm3_darwin_x86_64//:helm",
"//platforms:darwin_arm64": "@helm3_darwin_arm64//:helm",
}),
)
alias(
name = "yq_tool",
actual = select({
"//platforms:linux_x86_64": "@yq_linux_x86_64//:yq",
"//platforms:linux_arm64": "@yq_linux_arm64//:yq",
"//platforms:darwin_x86_64": "@yq_darwin_x86_64//:yq",
"//platforms:darwin_arm64": "@yq_darwin_arm64//:yq",
}),
)

View File

@ -0,0 +1,51 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "{NAME}.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "{NAME}.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "{NAME}.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Stage/environment the chart is being deployed to.
*/}}
{{- define "resf.stage" -}}
{{- .Values.stage | default "prod" }}
{{- end }}
{{/*
Long name of stage (prod -> production for example)
*/}}
{{- define "resf.longStage" -}}
{{- if eq .Values.stage "prod" }}
{{- "production" }}
{{- else if eq .Values.stage "dev" }}
{{- "development" }}
{{- else }}
{{- .Values.stage }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,84 @@
#!/usr/bin/env bash
set -e
shopt -s extglob
HELM_BIN="$(pwd)/TMPL_helm_bin"
YQ_BIN="$(pwd)/TMPL_yq_bin"
NAME="TMPL_name"
staging_dir="TMPL_staging_dir"
image_name="TMPL_image_name"
tarball_file_path="$(pwd)/TMPL_tarball_file_path"
IFS=';' read -ra stamp_files <<< "TMPL_stamp_files"
# Find STABLE_BUILD_TAG, STABLE_OCI_REGISTRY, STABLE_OCI_REGISTRY_REPO, STABLE_OCI_REGISTRY_NO_NESTED_SUPPORT_IN_2022_SHAME_ON_YOU_AWS and make it available to the script.
vars=("STABLE_BUILD_TAG" "STABLE_OCI_REGISTRY" "STABLE_OCI_REGISTRY_REPO" "STABLE_OCI_REGISTRY_NO_NESTED_SUPPORT_IN_2022_SHAME_ON_YOU_AWS")
for stamp in "${stamp_files[@]}"; do
for var in "${vars[@]}"; do
if grep -q "${var} " "${stamp}"; then
export "${var}"="$(grep "${var} " "${stamp}" | cut -d ' ' -f 2 | tr -d '\n')"
fi
done
done
helm_repo="${STABLE_OCI_REGISTRY}/${STABLE_OCI_REGISTRY_REPO}/$image_name"
helm_tag="${STABLE_BUILD_TAG}"
if [[ "${STABLE_OCI_REGISTRY_NO_NESTED_SUPPORT_IN_2022_SHAME_ON_YOU_AWS}" == "true" ]]; then
helm_repo="${STABLE_OCI_REGISTRY}/${STABLE_OCI_REGISTRY_REPO}"
helm_tag="${image_name}-${STABLE_BUILD_TAG}"
fi
# Change to the staging directory
cd $staging_dir || exit 1
# This codebase will probably use resfdeploy so let's just rename the manifest
# files to something that makes more sense for Helm
move_deployment() {
mv "$1" "deployment.yaml"
helm_repo="$(grep "calculated-image:" deployment.yaml | cut -d '"' -f 2)"
helm_tag="$(grep "calculated-tag:" deployment.yaml | cut -d '"' -f 2)"
}
f="helm-001-ns-sa.yaml"; test -f "$f" && mv "$f" "serviceaccount.yaml"
f="helm-002-migrate.yaml"; test -f "$f" && mv "$f" "migrate.yaml"
f="helm-003-deployment.yaml"; test -f "$f" && move_deployment "$f"
f="helm-004-svc-vs-dr.yaml"; test -f "$f" && mv "$f" "service-ingress.yaml"
# Move yaml files that isn't Chart.yaml or values.yaml to the templates directory
mkdir -p templates
mv !(Chart.yaml|values.yaml|templates|.helmignore) templates
# Envsubst _helpers.tpl to fill in $NAME
CHART_NAME="$($YQ_BIN '.name' Chart.yaml)"
sed "s/{NAME}/$CHART_NAME/" templates/_helpers.tpl > templates/_helpers.tpl.new
rm -f templates/_helpers.tpl
mv templates/_helpers.tpl.new templates/_helpers.tpl
# Since the stage variable is required, make it "known" in values.yaml
chmod 777 values.yaml
echo "# The stage variable should be set to correct environment during deployment" >> values.yaml
echo "stage: prod" >> values.yaml
# The database connection variables are standardized, add here and make it known
# Only add the database variables for non-frontend charts
# todo(mustafa): add a better way to determine this
# tip: deploy.jsonnet already "knows" if a service requires a database or not
if [[ "$CHART_NAME" != *-frontend ]]; then
echo "# For database connection" >> values.yaml
echo "# Set postgresqlHostPort if you use initdb" >> values.yaml
echo "postgresqlHostPort: null" >> values.yaml
echo "# Set databaseUrl if you don't use initdb" >> values.yaml
echo "databaseUrl: null" >> values.yaml
fi
# Service account name can also be customized
echo "# The service account name can be customized" >> values.yaml
echo "serviceAccountName: null" >> values.yaml
# Set default image values
${YQ_BIN} -i '.image.repository = '"\"$helm_repo\"" values.yaml
${YQ_BIN} -i '.image.tag = '"\"$helm_tag\"" values.yaml
${YQ_BIN} -i '.replicas = 1' values.yaml
# Helm package the chart
${HELM_BIN} package . > /dev/null 2>&1
mv ./*.tgz "$tarball_file_path"

View File

@ -0,0 +1,151 @@
def _helm_chart_impl(ctx):
"""
:param ctx
Package k8s manifests into a Helm chart with specified Chart.yaml
Sets default registry and tag to stable values defined in tools.sh / .envrc
The following variables will be set in _helpers.tpl:
* {name}.name
* {name}.fullname
* {name}.chart
The following values.yaml variables will be available:
* awsRegion (optional, default is us-east-2)
* stage (required, example: dev)
* image.repository (optional, default is STABLE_OCI_REGISTRY/STABLE_OCI_REGISTRY_REPO/image_name)
* image.tag (optional, default is STABLE_BUILD_TAG)
* {portName}.ingressHost (required if any ports/services are marked as exposed, port name can for example be "http" or "grpc")
* postgresHostPort (required if service is backend or databaseUrl is not set)
* databaseUrl (optional, required if postgresHostPort is not set)
internal:
The general structure of a Helm chart should be as follows:
* Chart.yaml
* values.yaml
* .helmignore
* templates/
* _helpers.tpl
* ctx.files.srcs
If resfdeploy templates are used, the following values.yaml variables changes defaults:
* image.repository (sourced from deployment.yaml)
* image.tag (sourced from deployment.yaml)
"""
# The stamp files should be brought in to be able to apply default registry and tag
stamp_files = [ctx.info_file, ctx.version_file]
# Create new staging directory
staging_dir = "chart"
tmp_dirname = ""
# Declare inputs to the final Helm script
inputs = []
# Fail if srcs contains a file called Chart.yaml, values.yaml, _helpers.tpl or .helmignore
for src in ctx.files.srcs:
if src.basename in ["Chart.yaml", "values.yaml", "_helpers.tpl", ".helmignore"]:
fail("{} is a reserved file name and should not exist in srcs".format(src.basename))
# Copy srcs into staging directory
for src in ctx.files.srcs + ctx.files.chart_yaml + ctx.files.values_yaml + ctx.files._helpers_tpl + ctx.files._helmignore:
cp_out = ctx.actions.declare_file(staging_dir + "/" + src.basename)
if tmp_dirname == "":
tmp_dirname = cp_out.dirname
inputs.append(cp_out)
ctx.actions.run_shell(
outputs = [cp_out],
inputs = [src],
mnemonic = "HelmCopyToStaging",
arguments = [src.path, cp_out.path],
command = "cp -RL $1 $2",
)
# Expand template for Helm script
tarball_file = ctx.actions.declare_file(ctx.label.name + ".tgz")
out_file = ctx.actions.declare_file(ctx.label.name + ".helm.bash")
ctx.actions.expand_template(
template = ctx.file._helm_script,
output = out_file,
substitutions = {
"TMPL_helm_bin": ctx.file._helm_bin.path,
"TMPL_yq_bin": ctx.file._yq_bin.path,
"TMPL_name": ctx.attr.package_name,
"TMPL_staging_dir": tmp_dirname,
"TMPL_image_name": ctx.attr.package_name if not ctx.attr.image_name else ctx.attr.image_name,
"TMPL_tarball_file_path": tarball_file.path,
"TMPL_stamp_files": ";".join([x.path for x in stamp_files]),
},
is_executable = True,
)
# Run Helm script and generate a tarball
ctx.actions.run(
outputs = [tarball_file],
inputs = inputs + stamp_files + [ctx.file._helm_bin, ctx.file._yq_bin],
executable = out_file,
mnemonic = "HelmChart",
)
return [DefaultInfo(
files = depset([tarball_file]),
)]
helm_chart = rule(
implementation = _helm_chart_impl,
attrs = {
"package_name": attr.string(
doc = "The name of the package",
mandatory = True,
),
"image_name": attr.string(
doc = "The name of the OCI image, defaults to package_name. Ignored if resfdeploy is used and sourced from deployment.yaml",
),
"chart_yaml": attr.label(
doc = "Chart.yaml file path",
default = ":Chart.yaml",
mandatory = True,
allow_single_file = True,
),
"values_yaml": attr.label(
doc = "values.yaml file path",
default = ":values.yaml",
mandatory = True,
allow_single_file = True,
),
"srcs": attr.label_list(
doc = "List of templates/manifests to be included in chart",
mandatory = True,
allow_files = True,
),
"_helm_bin": attr.label(
doc = "Helm binary path",
default = "//rules_resf/internal/helm:helm_tool",
allow_single_file = True,
cfg = "host",
),
"_yq_bin": attr.label(
doc = "yq binary path",
default = "//rules_resf/internal/helm:yq_tool",
allow_single_file = True,
cfg = "host",
),
"_helpers_tpl": attr.label(
doc = "Helpers template path",
default = "//rules_resf/internal/helm:_helpers.tpl",
allow_single_file = True,
),
"_helm_script": attr.label(
doc = "Helm script path",
default = "//rules_resf/internal/helm:helm.bash",
allow_single_file = True,
),
"_helmignore": attr.label(
doc = "Helmignore file path",
default = "//rules_resf/internal/helm:.helmignore",
allow_single_file = True,
),
},
)

View File

View File

View File

@ -0,0 +1,40 @@
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
build_file_content = """
exports_files(["helm"])
"""
patch_cmds = ["mv */helm helm"]
def helm3_repositories():
http_archive(
name = "helm3_linux_x86_64",
sha256 = "1484ffb0c7a608d8069470f48b88d729e88c41a1b6602f145231e8ea7b43b50a",
urls = ["https://get.helm.sh/helm-v3.9.0-linux-amd64.tar.gz"],
patch_cmds = patch_cmds,
build_file_content = build_file_content,
)
http_archive(
name = "helm3_linux_arm64",
sha256 = "5c0aa709c5aaeedd190907d70f9012052c1eea7dff94bffe941b879a33873947",
urls = ["https://get.helm.sh/helm-v3.9.0-linux-arm64.tar.gz"],
patch_cmds = patch_cmds,
build_file_content = build_file_content,
)
http_archive(
name = "helm3_darwin_x86_64",
sha256 = "7e5a2f2a6696acf278ea17401ade5c35430e2caa57f67d4aa99c607edcc08f5e",
urls = ["https://get.helm.sh/helm-v3.9.0-darwin-amd64.tar.gz"],
patch_cmds = patch_cmds,
build_file_content = build_file_content,
)
http_archive(
name = "helm3_darwin_arm64",
sha256 = "22cf080ded5dd71ec15d33c13586ace9b6002e97518a76df628e67ecedd5aa70",
urls = ["https://get.helm.sh/helm-v3.9.0-darwin-arm64.tar.gz"],
patch_cmds = patch_cmds,
build_file_content = build_file_content,
)

View File

@ -0,0 +1,6 @@
load("//rules_resf/toolchains/helm-3:repositories.bzl", "helm3_repositories")
load("//rules_resf/toolchains/yq:repositories.bzl", "yq_repositories")
def toolchains_repositories():
helm3_repositories()
yq_repositories()

View File

View File

@ -0,0 +1,40 @@
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
build_file_content = """
exports_files(["yq"])
"""
patch_cmds = ["mv yq_* yq"]
def yq_repositories():
http_archive(
name = "yq_linux_x86_64",
sha256 = "29716620085fdc7e3d2d12a749124a5113091183306a274f8abc61009ca38996",
urls = ["https://github.com/mikefarah/yq/releases/download/v4.25.2/yq_linux_amd64.tar.gz"],
patch_cmds = patch_cmds,
build_file_content = build_file_content,
)
http_archive(
name = "yq_linux_arm64",
sha256 = "77d84462f65c4f4d9a972158887dcd35c029cf199ee9c42b573a6e6e6ecd372f",
urls = ["https://github.com/mikefarah/yq/releases/download/v4.25.2/yq_linux_arm64.tar.gz"],
patch_cmds = patch_cmds,
build_file_content = build_file_content,
)
http_archive(
name = "yq_darwin_x86_64",
sha256 = "b7a836729142a6f54952e9a7675ae183acb7fbacc36ff555ef763939a26731a6",
urls = ["https://github.com/mikefarah/yq/releases/download/v4.25.2/yq_darwin_amd64.tar.gz"],
patch_cmds = patch_cmds,
build_file_content = build_file_content,
)
http_archive(
name = "yq_darwin_arm64",
sha256 = "e2e8fe89ee4d4e7257838e5941c50ef5aa753a86c699ade8a099cd46f09da1d3",
urls = ["https://github.com/mikefarah/yq/releases/download/v4.25.2/yq_darwin_arm64.tar.gz"],
patch_cmds = patch_cmds,
build_file_content = build_file_content,
)