This commit is contained in:
Neil Hanlon 2024-03-28 21:31:14 +00:00 committed by GitHub
commit 6a06357dbe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
39 changed files with 691 additions and 726 deletions

View File

@ -24,6 +24,16 @@ load("//wrksp:python_deps.bzl", "python_deps")
python_deps()
# --end python--
http_archive(
name = "rules_pkg",
urls = [
"https://github.com/bazelbuild/rules_pkg/releases/download/0.10.1/rules_pkg-0.10.1.tar.gz",
],
sha256 = "d250924a2ecc5176808fc4c25d5cf5e9e79e6346d79d5ab1c493e289e722d1d0",
)
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
http_archive(
name = "com_google_protobuf",
sha256 = "d19643d265b978383352b3143f04c0641eea75a75235c111cc01a1350173180e",

View File

@ -109,7 +109,7 @@ local manifestYamlStream = function (value, indent_array_in_object=false, c_docu
image: image,
tag: tag,
};
local istio_mode = if helm_mode then false else if utils.local_image then false else true;
local istio_mode = true; #if helm_mode then false else if utils.local_image then false else true;
{
[nssa]: (if helm_mode then '{{ if not .Values.serviceAccountName }}\n' else '') + manifestYamlStream([
@ -248,7 +248,7 @@ local manifestYamlStream = function (value, indent_array_in_object=false, c_docu
'prometheus.io/port': '7332',
}),
volumes: (if std.objectHas(info, 'volumes') then info.volumes(metadata) else []),
ports: std.map(function(x) x { expose: null, external: null }, ports),
ports: [utils.filterObjectFields(port, ['expose']) for port in ports],
health: if std.objectHas(info, 'health') then info.health,
env: env + (if dbname != '' && info.backend then ([dbPassEnv]) else []) + [
{
@ -258,7 +258,7 @@ local manifestYamlStream = function (value, indent_array_in_object=false, c_docu
] + [
if std.objectHas(srv, 'expose') && srv.expose then (if helm_mode then {
name: '%s_PUBLIC_URL' % [std.asciiUpper(std.strReplace(std.strReplace(srv.name, stage, ''), '-', '_'))],
value: 'https://{{ .Values.%s.ingressHost }}!!' % [srv.portName],
value: 'https://{{ .Values.%s.ingressHost }}!!' % [srv.name],
} else {
name: '%s_PUBLIC_URL' % [std.asciiUpper(std.strReplace(std.strReplace(srv.name, stage, ''), '-', '_'))],
value: 'https://%s' % mappings.get(srv.name, user),
@ -298,7 +298,7 @@ local manifestYamlStream = function (value, indent_array_in_object=false, c_docu
'konghq.com/protocols': (if helm_mode then '{{ .Values.kongProtocols | default !"%ss!" }}' else '%ss') % std.strReplace(std.strReplace(std.strReplace(srv.name, metadata.name, ''), stage, ''), '-', ''),
}
},
host=if helm_mode then '{{ .Values.%s.ingressHost }}' % srv.portName else mappings.get(srv.name, user),
host=if helm_mode then '{{ .Values.%s.ingressHost }}' % srv.name else mappings.get(srv.name, user),
port=srv.port,
srvName=srv.name + '-service',
) else null for srv in services]) +

View File

@ -18,4 +18,23 @@ local stage_no_dash = std.strReplace(stage, '-', '');
stage: stage,
user: user,
stage_no_dash: stage_no_dash,
// Function to filter an object by excluding specified fields.
// Parameters:
// - inputObject: The object to be filtered.
// - fieldsToIgnore: List of fields to be ignored from the input object.
filterObjectFields(inputObject, fieldsToIgnore)::
// Iterating over the fields in the input object and creating a new object
// without the fields specified in `fieldsToIgnore`.
std.foldl(function(filteredObject, currentField)
// If current field is in `fieldsToIgnore`, return the filtered object as is.
// Otherwise, add the current field to the filtered object.
(
if std.member(fieldsToIgnore, currentField) then
filteredObject
else
filteredObject + { [currentField]: inputObject[currentField] }
),
// Starting with an empty object and iterating over each field in the input object.
std.objectFields(inputObject), {}),
}

View File

@ -91,7 +91,7 @@ export default async function (opts) {
opts.issuerBaseURL.endsWith('.localhost/')) &&
process.env['RESF_ENV']
) {
const kong = 'kong-proxy.kong.svc.cluster.local';
const kong = 'istio-ingressgateway.istio-system.svc.cluster.local';
const urlObject = new URL(opts.issuerBaseURL);
console.warn(`Forcing ${urlObject.hostname} to resolve to ${kong}`);
const lookup = async () => {

18
hack/k3s/pod.yaml Normal file
View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
name: volume-test
spec:
containers:
- name: volume-test
image: nginx:stable-alpine
imagePullPolicy: IfNotPresent
volumeMounts:
- name: volv
mountPath: /data
ports:
- containerPort: 80
volumes:
- name: volv
persistentVolumeClaim:
claimName: local-path-pvc

12
hack/k3s/pvc.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: local-path-pvc
namespace: default
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 10Gi

View File

@ -35,6 +35,7 @@
trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT
USER="$(whoami)"
kubectl -n "$USER-dev" port-forward svc/spicedb-dev-grpc-dev-service 50051 &
kubectl -n "$USER-dev" port-forward svc/hydra-admin-dev-http-dev-service 4445 &
kubectl -n "$USER-dev" port-forward svc/hydra-public-dev-http-dev-service 4444
kubectl -n "$USER-dev" port-forward svc/spicedb-grpc-dev-service 50051 &
kubectl -n "$USER-dev" port-forward svc/hydra-admin-http-dev-service 4445 &
kubectl -n "$USER-dev" port-forward svc/hydra-public-http-dev-service 4444 &
sudo kubectl --kubeconfig $KUBECONFIG -n "istio-system" port-forward --address 0.0.0.0 deployment/istio-ingressgateway 80:8080 443:8443

View File

@ -43,19 +43,20 @@ fi
cd "$DIR_TO_TEMPORAL" || exit 1
export SQL_PLUGIN=postgres
export SQL_PLUGIN=postgres12
export SQL_HOST=localhost
export SQL_PORT=$POSTGRES_PORT
export SQL_USER=postgres
export SQL_PASSWORD=postgres
./temporal-sql-tool create-database -database temporal
SQL_DATABASE=temporal ./temporal-sql-tool setup-schema -v 0.0
SQL_DATABASE=temporal ./temporal-sql-tool update -schema-dir schema/postgresql/v96/temporal/versioned
./temporal-sql-tool -database temporal create-database
./temporal-sql-tool -database temporal_visibility create-database
SQL_DATABASE=temporal ./temporal-sql-tool setup-schema -v 0.0
SQL_DATABASE=temporal ./temporal-sql-tool update -schema-dir schema/postgresql/v12/temporal/versioned
./temporal-sql-tool create-database -database temporal_visibility
SQL_DATABASE=temporal_visibility ./temporal-sql-tool setup-schema -v 0.0
SQL_DATABASE=temporal_visibility ./temporal-sql-tool update -schema-dir schema/postgresql/v96/visibility/versioned
SQL_DATABASE=temporal_visibility ./temporal-sql-tool update -schema-dir schema/postgresql/v12/visibility/versioned
sleep 10

View File

@ -33,6 +33,7 @@
# TODO: Setup an bash error trap, to make errors from failing commands more
# visible and draw attention of the user to these errors.
source .envrc.local
# Install postgres
helm repo add bitnami https://charts.bitnami.com/bitnami
@ -45,8 +46,7 @@ helm repo add bitnami https://charts.bitnami.com/bitnami
# and places a PersisentVolumeClaim for this.
# Ensure that the cluster provides PersistentVolumes:
if kubectl get PersistentVolume -o json
| jq -e '.items | .[].status | select(.phase == "Bound")' ; then
if kubectl get PersistentVolume -o json | jq -e '.items | .[].status | select(.phase == "Bound")' ; then
echo "Ok found at least one PersistentVolume"
else
echo "The postgresql helm chart has a PersistentVolumeClaim (PVC)."

View File

@ -36,6 +36,7 @@ resfdeploy.new({
name: 'http',
containerPort: 4445,
protocol: 'TCP',
expose: true,
},
],
health: {

View File

@ -5,7 +5,7 @@ local utils = import 'ci/utils.jsonnet';
local tag = std.extVar('tag');
local DSN = db.dsn('hydra');
local authn = if kubernetes.prod() then 'https://id.build.resf.org' else 'http://obsidian.pdot.localhost:16000';
local authn = if kubernetes.prod() then 'https://id.build.resf.org' else 'https://id-dev.internal.pdev.resf.localhost';
{
image: 'oryd/hydra',

View File

@ -49,7 +49,7 @@ export function hydraPublicUrl() {
if (process.env['HYDRA_PUBLIC_URL']) {
return process.env['HYDRA_PUBLIC_URL'];
}
return 'https://hdr-dev.internal.rdev.ciq.localhost';
return 'https://hdr-dev.internal.pdev.resf.localhost';
}
const svc = svcNameHttp('hydra-public');
return endpointHttp(svc, NS('hydra-public'), ':4444');
@ -59,7 +59,7 @@ export function hydraPublicUrl() {
function hydraAdminUrl() {
return envOverridable('hydra_admin', 'http', () => {
if (!process.env['RESF_ENV']) {
return 'https://hdr-admin-dev.internal.rdev.ciq.localhost';
return 'https://hdr-admin-dev.internal.pdev.resf.localhost';
}
const svc = svcNameHttp('hydra-admin');
return endpointHttp(svc, NS('hydra-admin'), ':4445');

View File

@ -30,7 +30,7 @@ dependencies:
condition: prometheus.enabled
- name: elasticsearch
repository: https://helm.elastic.co
version: 7.16.3
version: 7.17.3
condition: elasticsearch.enabled
- name: grafana
repository: https://grafana.github.io/helm-charts
@ -49,8 +49,8 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.15.2
version: 0.33.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 1.15.2
appVersion: 1.22.4

View File

@ -1,23 +0,0 @@
The MIT License
Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
Copyright (c) 2020 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,549 +0,0 @@
# Temporal Helm Chart
[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Ftemporalio%2Ftemporal-helm-charts.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Ftemporalio%2Ftemporal-helm-charts?ref=badge_shield)
Temporal is a distributed, scalable, durable, and highly available orchestration engine designed to execute asynchronous long-running business logic in a resilient way.
This repo contains a basic V3 [Helm](https://helm.sh) chart that deploys Temporal to a Kubernetes cluster. The dependencies that are bundled with this solution by default offer an easy way to experiment with Temporal software. This Helm chart can also be used to install just the Temporal server, configured to connect to dependencies (such as a Cassandra, MySQL, or PostgreSQL database) that you may already have available in your environment.
**We do not recommend using Helm for managing Temporal deployments in production**. Rather, we recommend it for templating/generating manifests for Temporal's internal services only. [See our recent discussion on this topic](https://docs.temporal.io/blog/temporal-and-kubernetes/).
This Helm Chart code is tested by a dedicated test pipeline. It is also used extensively by other Temporal pipelines for testing various aspects of Temporal systems. Our test pipeline currently uses Helm 3.1.1.
# Install Temporal service on a Kubernetes cluster
## Prerequisites
This sequence assumes
* that your system is configured to access a kubernetes cluster (e. g. [AWS EKS](https://aws.amazon.com/eks/), [kind](https://kind.sigs.k8s.io/), or [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/)), and
* that your machine has
- [AWS CLI V2](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html),
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/), and
- [Helm v3](https://helm.sh)
installed and able to access your cluster.
## Download Helm Chart Dependencies
Download Helm dependencies:
```bash
~/temporal-helm$ helm dependencies update
```
## Install Temporal with Helm Chart
Temporal can be configured to run with various dependencies. The default "Batteries Included" Helm Chart configuration deploys and configures the following components:
* Cassandra
* ElasticSearch
* Prometheus
* Grafana
The sections that follow describe various deployment configurations, from a minimal one-replica installation using included dependencies, to a replicated deployment on existing infrastructure.
### Minimal installation with required dependencies only
To install Temporal in a limited but working and self-contained configuration (one replica of Cassandra and each of Temporal's services, no metrics or ElasticSearch), you can run the following command
```
~/temporal-helm$ helm install \
--set server.replicaCount=1 \
--set cassandra.config.cluster_size=1 \
--set prometheus.enabled=false \
--set grafana.enabled=false \
--set elasticsearch.enabled=false \
temporaltest . --timeout 15m
```
This configuration consumes limited resources and it is useful for small scale tests (such as using minikube).
Below is an example of an environment installed in this configuration:
```
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
temporaltest-admintools-6cdf56b869-xdxz2 1/1 Running 0 11m
temporaltest-cassandra-0 1/1 Running 0 11m
temporaltest-frontend-5d5b6d9c59-v9g5j 1/1 Running 2 11m
temporaltest-history-64b9ddbc4b-bwk6j 1/1 Running 2 11m
temporaltest-matching-c8887ddc4-jnzg2 1/1 Running 2 11m
temporaltest-metrics-server-7fbbf65cff-rp2ks 1/1 Running 0 11m
temporaltest-web-77f68bff76-ndkzf 1/1 Running 0 11m
temporaltest-worker-7c9d68f4cf-8tzfw 1/1 Running 2 11m
```
### Install with required and optional dependencies
This method requires a three node kubernetes cluster to successfully bring up all the dependencies.
By default, Temporal Helm Chart configures Temporal to run with a three node Cassandra cluster (for persistence) and Elasticsearch (for "visibility" features), Prometheus, and Grafana. By default, Temporal Helm Chart installs all dependencies, out of the box.
To install Temporal with all of its dependencies run this command:
```bash
~/temporal-helm$ helm install temporaltest . --timeout 900s
```
To use your own instance of ElasticSearch, MySQL, PostgreSQL, or Cassandra, please read the "Bring Your Own" sections below.
Other components (Prometheus, Grafana) can be omitted from the installation by setting their corresponding `enable` flag to `false`:
```bash
~/temporal-helm$ helm install \
--set prometheus.enabled=false \
--set grafana.enabled=false \
temporaltest . --timeout 900s
```
### Install with sidecar containers
You may need to provide your own sidecar containers.
To do so, you may look at the example for Google's `cloud sql proxy` in the `values/values.cloudsqlproxy.yaml` and pass that file to `helm install`.
Example:
```bash
~/temporal-helm$ helm install -f values/values.cloudsqlproxy.yaml temporaltest . --timeout 900s
```
### Install with your own ElasticSearch
You might already be operating an instance of ElasticSearch that you want to use with Temporal.
To do so, fill in the relevant configuration values in `values.elasticsearch.yaml`, and pass the file to 'helm install'.
Example:
```bash
~/temporal-helm$ helm install -f values/values.elasticsearch.yaml temporaltest . --timeout 900s
```
### Install with your own MySQL
You might already be operating a MySQL instance that you want to use with Temporal.
In this case, create and configure temporal databases on your MySQL host with `temporal-sql-tool`. The tool is part of [temporal repo](https://github.com/temporalio/temporal), and it relies on the schema definition, in the same repo.
Here are examples of commands you can use to create and initialize the databases:
```bash
# in https://github.com/temporalio/temporal git repo dir
export SQL_PLUGIN=mysql
export SQL_HOST=mysql_host
export SQL_PORT=3306
export SQL_USER=mysql_user
export SQL_PASSWORD=mysql_password
./temporal-sql-tool create-database -database temporal
SQL_DATABASE=temporal ./temporal-sql-tool setup-schema -v 0.0
SQL_DATABASE=temporal ./temporal-sql-tool update -schema-dir schema/mysql/v57/temporal/versioned
./temporal-sql-tool create-database -database temporal_visibility
SQL_DATABASE=temporal_visibility ./temporal-sql-tool setup-schema -v 0.0
SQL_DATABASE=temporal_visibility ./temporal-sql-tool update -schema-dir schema/mysql/v57/visibility/versioned
```
Once you initialized the two databases, fill in the configuration values in `values/values.mysql.yaml`, and run
```bash
# in https://github.com/temporalio/helm-charts git repo dir
helm install -f values/values.mysql.yaml temporaltest . --timeout 900s
```
Alternatively, instead of modifying `values/values.mysql.yaml`, you can supply those values in your command line:
```bash
# in https://github.com/temporalio/helm-charts git repo dir
helm install -f values/values.mysql.yaml temporaltest \
--set elasticsearch.enabled=false \
--set server.config.persistence.default.sql.user=mysql_user \
--set server.config.persistence.default.sql.password=mysql_password \
--set server.config.persistence.visibility.sql.user=mysql_user \
--set server.config.persistence.visibility.sql.password=mysql_password \
--set server.config.persistence.default.sql.host=mysql_host \
--set server.config.persistence.visibility.sql.host=mysql_host . --timeout 900s
```
*NOTE:* For MYSQL <5.7.20 (e.g AWS Aurora MySQL) use `values/values.aurora-mysql.yaml`
### Install with your own PostgreSQL
You might already be operating a PostgreSQL instance that you want to use with Temporal.
In this case, create and configure temporal databases on your PostgreSQL host with `temporal-sql-tool`. The tool is part of [temporal repo](https://github.com/temporalio/temporal), and it relies on the schema definition, in the same repo.
Here are examples of commands you can use to create and initialize the databases:
```bash
# in https://github.com/temporalio/temporal git repo dir
export SQL_PLUGIN=postgres
export SQL_HOST=postgresql_host
export SQL_PORT=5432
export SQL_USER=postgresql_user
export SQL_PASSWORD=postgresql_password
./temporal-sql-tool create-database -database temporal
SQL_DATABASE=temporal ./temporal-sql-tool setup-schema -v 0.0
SQL_DATABASE=temporal ./temporal-sql-tool update -schema-dir schema/postgresql/v96/temporal/versioned
./temporal-sql-tool create-database -database temporal_visibility
SQL_DATABASE=temporal_visibility ./temporal-sql-tool setup-schema -v 0.0
SQL_DATABASE=temporal_visibility ./temporal-sql-tool update -schema-dir schema/postgresql/v96/visibility/versioned
```
Once you initialized the two databases, fill in the configuration values in `values/values.postgresql.yaml`, and run
```bash
# in https://github.com/temporalio/helm-charts git repo dir
helm install -f values/values.postgresql.yaml temporaltest . --timeout 900s
```
Alternatively, instead of modifying `values/values.postgresql.yaml`, you can supply those values in your command line:
```bash
# in https://github.com/temporalio/helm-charts git repo dir
helm install -f values/values.postgresql.yaml temporaltest \
--set elasticsearch.enabled=false \
--set server.config.persistence.default.sql.user=postgresql_user \
--set server.config.persistence.default.sql.password=postgresql_password \
--set server.config.persistence.visibility.sql.user=postgresql_user \
--set server.config.persistence.visibility.sql.password=postgresql_password \
--set server.config.persistence.default.sql.host=postgresql_host \
--set server.config.persistence.visibility.sql.host=postgresql_host . --timeout 900s
```
### Install with your own Cassandra
You might already be operating a Cassandra instance that you want to use with Temporal.
In this case, create and setup keyspaces in your Cassandra instance with `temporal-cassandra-tool`. The tool is part of [temporal repo](https://github.com/temporalio/temporal), and it relies on the schema definition, in the same repo.
Here are examples of commands you can use to create and initialize the keyspaces:
```bash
# in https://github.com/temporalio/temporal git repo dir
export CASSANDRA_HOST=cassandra_host
export CASSANDRA_PORT=9042
export CASSANDRA_USER=cassandra_user
export CASSANDRA_PASSWORD=cassandra_user_password
./temporal-cassandra-tool create-Keyspace -k temporal
CASSANDRA_KEYSPACE=temporal ./temporal-cassandra-tool setup-schema -v 0.0
CASSANDRA_KEYSPACE=temporal ./temporal-cassandra-tool update -schema-dir schema/cassandra/temporal/versioned
./temporal-cassandra-tool create-Keyspace -k temporal_visibility
CASSANDRA_KEYSPACE=temporal_visibility ./temporal-cassandra-tool setup-schema  -v 0.0
CASSANDRA_KEYSPACE=temporal_visibility ./temporal-cassandra-tool update -schema-dir schema/cassandra/visibility/versioned
```
Once you initialized the two keyspaces, fill in the configuration values in `values/values.cassandra.yaml`, and run
```bash
~/temporal-helm$ helm install -f values/values.cassandra.yaml temporaltest . --timeout 900s
```
### Install and configure Temporal
If a live application environment already uses systems that Temporal can use as dependencies, then those systems can continue to be used. This Helm chart can install the minimal pieces of Temporal such that it can then be configured to use those systems as its dependencies.
The example below demonstrates a few things:
1. How to set values via the command line rather than the environment.
2. How to configure a database (shows Cassandra, but MySQL works the same way)
3. How to enable TLS for the database connection.
4. How to enable Auth for the Web UI
```bash
helm install temporaltest \
-f values/values.cassandra.yaml \
-f values/values.elasticsearch.yaml \
--set grafana.enabled=false \
--set prometheus.enabled=false \
--set server.replicaCount=5 \
--set server.config.persistence.default.cassandra.hosts=cassandra.data.host.example \
--set server.config.persistence.default.cassandra.user=cassandra_user \
--set server.config.persistence.default.cassandra.password=cassandra_user_password \
--set server.config.persistence.default.cassandra.tls.caData=$(base64 --wrap=0 cassandra.ca.pem) \
--set server.config.persistence.default.cassandra.tls.enabled=true \
--set server.config.persistence.default.cassandra.replicationFactor=3 \
--set server.config.persistence.default.cassandra.keyspace=temporal \
--set server.config.persistence.visibility.cassandra.hosts=cassandra.vis.host.example \
--set server.config.persistence.visibility.cassandra.user=cassandra_user_vis \
--set server.config.persistence.visibility.cassandra.password=cassandra_user_vis_password \
--set server.config.persistence.visibility.cassandra.tls.caData=$(base64 --wrap=0 cassandra.ca.pem) \
--set server.config.persistence.visibility.cassandra.tls.enabled=true \
--set server.config.persistence.visibility.cassandra.replicationFactor=3 \
--set server.config.persistence.visibility.cassandra.keyspace=temporal_visibility \
. \
--timeout 15m \
--wait
```
## Play With It
### Exploring Your Cluster
You can use your favorite kubernetes tools ([k9s](https://github.com/derailed/k9s), [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/), etc.) to interact with your cluster.
```bash
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
...
temporaltest-admintools ClusterIP 172.20.237.59 <none> 22/TCP 15m
temporaltest-frontend-headless ClusterIP None <none> 7233/TCP,9090/TCP 15m
temporaltest-history-headless ClusterIP None <none> 7234/TCP,9090/TCP 15m
temporaltest-matching-headless ClusterIP None <none> 7235/TCP,9090/TCP 15m
temporaltest-worker-headless ClusterIP None <none> 7239/TCP,9090/TCP 15m
...
```
```
$ kubectl get pods
...
temporaltest-admintools-7b6c599855-8bk4x 1/1 Running 0 25m
temporaltest-frontend-54d94fdcc4-bx89b 1/1 Running 2 25m
temporaltest-history-86d8d7869-lzb6f 1/1 Running 2 25m
temporaltest-matching-6c7d6d7489-kj5pj 1/1 Running 3 25m
temporaltest-worker-769b996fd-qmvbw 1/1 Running 2 25m
...
```
### Running Temporal CLI From the Admin Tools Container
You can also shell into `admin-tools` container via [k9s](https://github.com/derailed/k9s) or by running
```
$ kubectl exec -it services/temporaltest-admintools /bin/bash
bash-5.0#
```
and run Temporal CLI from there:
```
bash-5.0# tctl namespace list
Name: temporal-system
Id: 32049b68-7872-4094-8e63-d0dd59896a83
Description: Temporal internal system namespace
OwnerEmail: temporal-core@temporal.io
NamespaceData: map[string]string(nil)
Status: Registered
RetentionInDays: 7
EmitMetrics: true
ActiveClusterName: active
Clusters: active
HistoryArchivalStatus: Disabled
VisibilityArchivalStatus: Disabled
Bad binaries to reset:
+-----------------+----------+------------+--------+
| BINARY CHECKSUM | OPERATOR | START TIME | REASON |
+-----------------+----------+------------+--------+
+-----------------+----------+------------+--------+
```
```
bash-5.0# tctl --namespace nonesuch namespace desc
Error: Namespace nonesuch does not exist.
Error Details: Namespace nonesuch does not exist.
```
```
bash-5.0# tctl --namespace nonesuch namespace re
Namespace nonesuch successfully registered.
```
```
bash-5.0# tctl --namespace nonesuch namespace desc
Name: nonesuch
UUID: 465bb575-8c01-43f8-a67d-d676e1ae5eae
Description:
OwnerEmail:
NamespaceData: map[string]string(nil)
Status: Registered
RetentionInDays: 3
EmitMetrics: false
ActiveClusterName: active
Clusters: active
HistoryArchivalStatus: ArchivalStatusDisabled
VisibilityArchivalStatus: ArchivalStatusDisabled
Bad binaries to reset:
+-----------------+----------+------------+--------+
| BINARY CHECKSUM | OPERATOR | START TIME | REASON |
+-----------------+----------+------------+--------+
+-----------------+----------+------------+--------+
```
### Forwarding Your Machine's Local Port to Temporal FrontEnd
You can also expose your instance's front end port on your local machine:
```
$ kubectl port-forward services/temporaltest-frontend-headless 7233:7233
Forwarding from 127.0.0.1:7233 -> 7233
Forwarding from [::1]:7233 -> 7233
```
and, from a separate window, use the local port to access the service from your application or Temporal samples.
### Forwarding Your Machine's Local Port to Temporal Web UI
Similarly to how you accessed Temporal front end via kubernetes port forwarding, you can access your Temporal instance's web user interface.
To do so, forward your machine's local port to the Web service in your Temporal installation
```
$ kubectl port-forward services/temporaltest-web 8088:8088
Forwarding from 127.0.0.1:8088 -> 8088
Forwarding from [::1]:8088 -> 8088
```
and navigate to http://127.0.0.1:8088 in your browser.
### Exploring Metrics via Grafana
By default, the full "Batteries Included" configuration comes with a few Grafana dashboards.
To access those dashboards, follow the following steps:
1. Extract Grafana's `admin` password from your installation:
```
$ kubectl get secret --namespace default temporaltest-grafana -o jsonpath="{.data.admin-password}" | base64 --decode
t7EqZQpiB6BztZV321dEDppXbeisdpiEAMgnu6yy%
```
2. Setup port forwarding, so you can access Grafana from your host:
```
$ kubectl port-forward services/temporaltest-grafana 8081:80
Forwarding from 127.0.0.1:8081 -> 3000
Forwarding from [::1]:8081 -> 3000
...
```
3. Navigate to the forwarded Grafana port in your browser (http://localhost:8081/), login as `admin` (using the password from step 1), and click on the "Home" button (upper left corner) to see available dashboards.
### Updating Dynamic Configs
By default dynamic config is empty, if you want to override some properties for your cluster, you should:
1. Create a yaml file with your config (for example dc.yaml).
2. Populate it with some values under server.dynamicConfig prefix (use the sample provided at `values/values.dynamic_config.yaml` as a starting point)
3. Install your helm configuration:
```bash
$ helm install -f values/values.dynamic_config.yaml temporaltest . --timeout 900s
```
Note that if you already have a running cluster you can use the "helm upgrade" command to change dynamic config values:
```bash
$ helm upgrade -f values/values.dynamic_config.yaml temporaltest . --timeout 900s
```
WARNING: The "helm upgrade" approach will trigger a rolling upgrade of all the pods.
If a rolling upgrade is not desirable, you can also generate the ConfigMap file explicitly and then apply it using the following command:
```bash
$ kubectl apply -f dynamicconfigmap.yaml
```
You can use helm upgrade with the "--dry-run" option to generate the content for the dynamicconfigmap.yaml.
The dynamic-config ConfigMap is referenced as a mounted volume within the Temporal Containers, so any applied change will be automatically picked up by all pods within a few minutes without the need for pod recycling. See k8S documentation (https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#mounted-configmaps-are-updated-automatically) for more details on how this works.
### Updating Temporal Web Config
the config file `server/config.yml` for the temporal web ui is referenced as a mounted volume within the Temporal Web UI Container and can be populated by inserting values in the `web.config` section in the `values.yml` for possible config check (https://github.com/temporalio/web#configuring-authentication-optional)
## Uninstalling
Note: in this example chart, uninstalling a Temporal instance also removes all the data that might have been created during its lifetime.
```bash
~/temporal-helm $ helm uninstall temporaltest
```
## Upgrading
To upgrade your cluster, upgrade your database schema (if the release includes schema changes), and then use `helm upgrade` command to perform a rolling upgrade of your installation.
Note:
* Not supported: running newer binaries with an older schema.
* Supported: downgrading binaries  running older binaries with a newer schema.
Example:
### Upgrade Schema
Here are examples of commands you can use to upgrade the "default" and "visibility" schemas in your "bring your own" Cassandra database.
Upgrade default schema:
```
temporal_v1.2.1 $ temporal-cassandra-tool \
--tls \
--tls-ca-file ... \
--user cassandra-user \
--password cassandra-password \
--endpoint cassandra.example.com \
--keyspace temporal \
--timeout 120 \
update \
--schema-dir ./schema/cassandra/temporal/versioned
```
Upgrade visibility schema:
```
temporal_v1.2.1 $ temporal-cassandra-tool \
--tls \
--tls-ca-file ... \
--user cassandra-user \
--password cassandra-password \
--endpoint cassandra.example.com \
--keyspace temporal_visibility \
--timeout 120 \
update \
--schema-dir ./schema/cassandra/visibility/versioned
```
To upgrade your MySQL database, please use `temporal-sql-tool` tool instead of `temporal-cassandra-tool`.
### Upgrade Temporal Instance's Docker Images
Here is an example of a `helm upgrade` command that can be used to upgrade a cluster:
```
helm-charts $ helm \
upgrade \
temporaltest \
-f values/values.cassandra.yaml \
--set elasticsearch.enabled=true \
--set server.replicaCount=8 \
--set server.config.persistence.default.cassandra.hosts='{c1.example.com,c2.example.com,c3.example.com}' \
--set server.config.persistence.default.cassandra.user=cassandra-user \
--set server.config.persistence.default.cassandra.password=cassandra-password \
--set server.config.persistence.default.cassandra.tls.caData=... \
--set server.config.persistence.default.cassandra.tls.enabled=true \
--set server.config.persistence.default.cassandra.replicationFactor=3 \
--set server.config.persistence.default.cassandra.keyspace=temporal \
--set server.config.persistence.visibility.cassandra.hosts='{c1.example.com,c2.example.com,c3.example.com}' \
--set server.config.persistence.visibility.cassandra.user=cassandra-user \
--set server.config.persistence.visibility.cassandra.password=cassandra-password \
--set server.config.persistence.visibility.cassandra.tls.caData=... \
--set server.config.persistence.visibility.cassandra.tls.enabled=true \
--set server.config.persistence.visibility.cassandra.replicationFactor=3 \
--set server.config.persistence.visibility.cassandra.keyspace=temporal_visibility \
--set server.image.tag=1.2.1 \
--set server.image.repository=temporalio/server \
--set admintools.image.tag=1.2.1 \
--set admintools.image.repository=temporalio/admin-tools \
--set web.image.tag=1.1.1 \
--set web.image.repository=temporalio/web \
. \
--wait \
--timeout 15m
```
# Acknowledgements
Many thanks to [Banzai Cloud](https://github.com/banzaicloud) whose [Cadence Helm Charts](https://github.com/banzaicloud/banzai-charts/tree/master/cadence) heavily inspired this work.
## License
[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Ftemporalio%2Ftemporal-helm-charts.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Ftemporalio%2Ftemporal-helm-charts?ref=badge_large)

View File

@ -206,6 +206,17 @@ Source: https://stackoverflow.com/a/52024583/3027614
{{- print "password" -}}
{{- end -}}
{{- define "temporal.persistence.sql.database" -}}
{{- $global := index . 0 -}}
{{- $store := index . 1 -}}
{{- $storeConfig := index $global.Values.server.config.persistence $store -}}
{{- if $storeConfig.sql.database -}}
{{- $storeConfig.sql.database -}}
{{- else -}}
{{- required (printf "Please specify database for %s store" $store) -}}
{{- end -}}
{{- end -}}
{{- define "temporal.persistence.sql.driver" -}}
{{- $global := index . 0 -}}
{{- $store := index . 1 -}}
@ -227,9 +238,9 @@ Source: https://stackoverflow.com/a/52024583/3027614
{{- $storeConfig := index $global.Values.server.config.persistence $store -}}
{{- if $storeConfig.sql.host -}}
{{- $storeConfig.sql.host -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql8")) -}}
{{- include "mysql.host" $global -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres12")) -}}
{{- include "postgresql.host" $global -}}
{{- else -}}
{{- required (printf "Please specify sql host for %s store" $store) $storeConfig.sql.host -}}
@ -242,9 +253,9 @@ Source: https://stackoverflow.com/a/52024583/3027614
{{- $storeConfig := index $global.Values.server.config.persistence $store -}}
{{- if $storeConfig.sql.port -}}
{{- $storeConfig.sql.port -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql8")) -}}
{{- $global.Values.mysql.service.port -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres12")) -}}
{{- $global.Values.postgresql.service.port -}}
{{- else -}}
{{- required (printf "Please specify sql port for %s store" $store) $storeConfig.sql.port -}}
@ -257,9 +268,9 @@ Source: https://stackoverflow.com/a/52024583/3027614
{{- $storeConfig := index $global.Values.server.config.persistence $store -}}
{{- if $storeConfig.sql.user -}}
{{- $storeConfig.sql.user -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql8")) -}}
{{- $global.Values.mysql.mysqlUser -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres12")) -}}
{{- $global.Values.postgresql.postgresqlUser -}}
{{- else -}}
{{- required (printf "Please specify sql user for %s store" $store) $storeConfig.sql.user -}}
@ -272,13 +283,13 @@ Source: https://stackoverflow.com/a/52024583/3027614
{{- $storeConfig := index $global.Values.server.config.persistence $store -}}
{{- if $storeConfig.sql.password -}}
{{- $storeConfig.sql.password -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql8")) -}}
{{- if or $global.Values.schema.setup.enabled $global.Values.schema.update.enabled -}}
{{- required "Please specify password for MySQL chart" $global.Values.mysql.mysqlPassword -}}
{{- else -}}
{{- $global.Values.mysql.mysqlPassword -}}
{{- end -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres12")) -}}
{{- if or $global.Values.schema.setup.enabled $global.Values.schema.update.enabled -}}
{{- required "Please specify password for PostgreSQL chart" $global.Values.postgresql.postgresqlPassword -}}
{{- else -}}
@ -297,9 +308,9 @@ Source: https://stackoverflow.com/a/52024583/3027614
{{- $storeConfig.sql.existingSecret -}}
{{- else if $storeConfig.sql.password -}}
{{- include "temporal.componentname" (list $global (printf "%s-store" $store)) -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql8")) -}}
{{- include "call-nested" (list $global "mysql" "mysql.secretName") -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres12")) -}}
{{- include "call-nested" (list $global "postgresql" "postgresql.secretName") -}}
{{- else -}}
{{- required (printf "Please specify sql password or existing secret for %s store" $store) $storeConfig.sql.existingSecret -}}
@ -312,9 +323,9 @@ Source: https://stackoverflow.com/a/52024583/3027614
{{- $storeConfig := index $global.Values.server.config.persistence $store -}}
{{- if or $storeConfig.sql.existingSecret $storeConfig.sql.password -}}
{{- print "password" -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql")) -}}
{{- else if and $global.Values.mysql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "mysql8")) -}}
{{- print "mysql-password" -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres")) -}}
{{- else if and $global.Values.postgresql.enabled (and (eq (include "temporal.persistence.driver" (list $global $store)) "sql") (eq (include "temporal.persistence.sql.driver" (list $global $store)) "postgres12")) -}}
{{- print "postgresql-password" -}}
{{- else -}}
{{- fail (printf "Please specify sql password or existing secret for %s store" $store) -}}

View File

@ -28,6 +28,13 @@ spec:
app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }}
app.kubernetes.io/component: admintools
app.kubernetes.io/part-of: {{ .Chart.Name }}
{{- with $.Values.admintools.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with $.Values.admintools.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{ include "temporal.serviceAccount" . }}
containers:
@ -39,8 +46,14 @@ spec:
containerPort: 22
protocol: TCP
env:
# TEMPORAL_CLI_ADDRESS is deprecated, use TEMPORAL_ADDRESS instead
- name: TEMPORAL_CLI_ADDRESS
value: {{ include "temporal.fullname" . }}-frontend:{{ include "temporal.frontend.grpcPort" . }}
- name: TEMPORAL_ADDRESS
value: {{ include "temporal.fullname" . }}-frontend:{{ include "temporal.frontend.grpcPort" . }}
{{- if .Values.admintools.additionalEnv }}
{{- toYaml .Values.admintools.additionalEnv | nindent 12 }}
{{- end }}
livenessProbe:
exec:
command:
@ -48,6 +61,18 @@ spec:
- /
initialDelaySeconds: 5
periodSeconds: 5
{{- with .Values.admintools.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.admintools.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.admintools.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with $.Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}

View File

@ -19,9 +19,10 @@ data:
persistence:
defaultStore: {{ $.Values.server.config.persistence.defaultStore }}
visibilityStore: visibility
{{- if or $.Values.elasticsearch.enabled $.Values.elasticsearch.external }}
advancedVisibilityStore: es-visibility
visibilityStore: es-visibility
{{- else }}
visibilityStore: visibility
{{- end }}
numHistoryShards: {{ $.Values.server.config.numHistoryShards }}
datastores:
@ -38,11 +39,9 @@ data:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- if $.Values.server.config.persistence.faultinjection}}
{{- if $.Values.server.config.persistence.faultinjection.rate }}
{{- with $.Values.server.config.persistence.default.faultInjection}}
faultInjection:
rate: {{ $.Values.server.config.persistence.faultinjection.rate }}
{{- end }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if eq (include "temporal.persistence.driver" (list $ "default")) "sql" }}
sql:
@ -58,15 +57,6 @@ data:
{{- end }}
{{- end }}
visibility:
{{- if eq (include "temporal.persistence.driver" (list $ "visibility")) "cassandra" }}
cassandra:
hosts: "{{ include "temporal.persistence.cassandra.hosts" (list $ "visibility") }}"
port: {{ include "temporal.persistence.cassandra.port" (list $ "visibility") }}
password: "{{ `{{ .Env.TEMPORAL_VISIBILITY_STORE_PASSWORD }}` }}"
{{- with (omit $.Values.server.config.persistence.visibility.cassandra "hosts" "port" "password" "existingSecret") }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- if eq (include "temporal.persistence.driver" (list $ "default")) "sql" }}
sql:
pluginName: "{{ include "temporal.persistence.sql.driver" (list $ "visibility") }}"
@ -103,14 +93,30 @@ data:
pprof:
port: 7936
metrics:
tags:
type: {{ $service }}
{{- with $.Values.server.metrics.tags }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- if $.Values.server.config.prometheus }}
prometheus:
{{- with $.Values.server.config.prometheus }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- else }}
prometheus:
timerType: histogram
listenAddress: "0.0.0.0:9090"
{{- end }}
{{- if $.Values.server.config.tls }}
tls:
{{- with $.Values.server.config.tls }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
services:
frontend:
@ -156,12 +162,35 @@ data:
rpcAddress: "127.0.0.1:7933"
{{- end }}
{{- if $.Values.server.config.dcRedirectionPolicy }}
dcRedirectionPolicy:
{{- with $.Values.server.config.dcRedirectionPolicy }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- else }}
dcRedirectionPolicy:
policy: "noop"
toDC: ""
{{- end }}
{{- if $.Values.server.archival }}
archival:
{{- with $.Values.server.archival }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- else }}
archival:
status: "disabled"
{{- end }}
{{- if $.Values.server.namespaceDefaults }}
namespaceDefaults:
{{- with $.Values.server.namespaceDefaults }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- else }}
{{- end }}
publicClient:
hostPort: "{{ include "temporal.componentname" (list $ "frontend") }}:{{ $.Values.server.frontend.service.port }}"

View File

@ -30,6 +30,9 @@ spec:
app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }}
app.kubernetes.io/component: {{ $service }}
app.kubernetes.io/part-of: {{ $.Chart.Name }}
{{- with (default $.Values.server.podLabels $serviceValues.podLabels) }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/server-configmap.yaml") $ | sha256sum }}
{{- if (default $.Values.server.metrics.annotations.enabled $serviceValues.metrics.annotations.enabled) }}
@ -44,9 +47,10 @@ spec:
{{ include "temporal.serviceAccount" $ }}
{{- if or $.Values.cassandra.enabled (or $.Values.elasticsearch.enabled $.Values.elasticsearch.external)}}
{{- if semverCompare ">=1.13.0" $.Chart.AppVersion}}
{{- with $.Values.server.securityContext }}
securityContext:
fsGroup: 1000 #temporal group
runAsUser: 1000 #temporal user
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
initContainers:
{{- if $.Values.cassandra.enabled }}
@ -61,16 +65,12 @@ spec:
image: "{{ $.Values.cassandra.image.repo }}:{{ $.Values.cassandra.image.tag }}"
imagePullPolicy: {{ $.Values.cassandra.image.pullPolicy }}
command: ['sh', '-c', 'until cqlsh {{ include "cassandra.host" $ }} {{ $.Values.cassandra.config.ports.cql }} -e "SELECT keyspace_name FROM system_schema.keyspaces" | grep {{ $.Values.server.config.persistence.default.cassandra.keyspace }}$; do echo waiting for default keyspace to become ready; sleep 1; done;']
- name: check-cassandra-visibility-schema
image: "{{ $.Values.cassandra.image.repo }}:{{ $.Values.cassandra.image.tag }}"
imagePullPolicy: {{ $.Values.cassandra.image.pullPolicy }}
command: ['sh', '-c', 'until cqlsh {{ include "cassandra.host" $ }} {{ $.Values.cassandra.config.ports.cql }} -e "SELECT keyspace_name FROM system_schema.keyspaces" | grep {{ $.Values.server.config.persistence.visibility.cassandra.keyspace }}$; do echo waiting for visibility keyspace to become ready; sleep 1; done;']
{{- end }}
{{- if or $.Values.elasticsearch.enabled $.Values.elasticsearch.external }}
- name: check-elasticsearch-index
image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}"
imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }}
command: ['sh', '-c', 'until curl --silent --fail --user {{ $.Values.elasticsearch.username }}:{{ $.Values.elasticsearch.password }} {{ $.Values.elasticsearch.scheme }}://{{ $.Values.elasticsearch.host }}:{{ $.Values.elasticsearch.port }}/{{ $.Values.elasticsearch.visibilityIndex }} 2>&1 > /dev/null; do echo waiting for elasticsearch index to become ready; sleep 1; done;']
command: ['sh', '-c', 'until curl --silent --fail {{- if and $.Values.elasticsearch.username $.Values.elasticsearch.password }} --user {{ $.Values.elasticsearch.username }}:{{ $.Values.elasticsearch.password }} {{- end }} {{ $.Values.elasticsearch.scheme }}://{{ $.Values.elasticsearch.host }}:{{ $.Values.elasticsearch.port }}/{{ $.Values.elasticsearch.visibilityIndex }} 2>&1 > /dev/null; do echo waiting for elasticsearch index to become ready; sleep 1; done;']
{{- end }}
{{- end }}
containers:
@ -98,6 +98,10 @@ spec:
value: "{{ $.Values.elasticsearch.username }}"
- name: ES_PWD
value: "{{ $.Values.elasticsearch.password }}"
- name: TEMPORAL_BROADCAST_ADDRESS
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICES
value: {{ $service }}
- name: TEMPORAL_STORE_PASSWORD
@ -114,6 +118,9 @@ spec:
- name: TEMPORAL_VERSION_CHECK_DISABLED
value: "1"
{{- end }}
{{- if or $.Values.server.additionalEnv $serviceValues.additionalEnv }}
{{- toYaml (default $.Values.server.additionalEnv $serviceValues.additionalEnv) | nindent 12 }}
{{- end }}
ports:
- name: rpc
containerPort: {{ include (printf "temporal.%s.grpcPort" $service) $ }}
@ -121,6 +128,9 @@ spec:
- name: metrics
containerPort: 9090
protocol: TCP
- name: membership
protocol: TCP
containerPort: {{ include (printf "temporal.%s.membershipPort" $service) $ }}
{{- if ne $service "worker"}}
livenessProbe:
initialDelaySeconds: 150
@ -134,14 +144,17 @@ spec:
- name: dynamic-config
mountPath: /etc/temporal/dynamic_config
{{- if $.Values.server.additionalVolumeMounts }}
{{- toYaml $.Values.server.additionalVolumeMounts | nindent 12}}
{{- toYaml $.Values.server.additionalVolumeMounts | nindent 12}}
{{- end }}
resources:
{{- toYaml (default $.Values.server.resources $serviceValues.resources) | nindent 12 }}
{{- with $serviceValues.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if $.Values.server.sidecarContainers }}
{{- toYaml $.Values.server.sidecarContainers | nindent 8 }}
{{- end }}
{{- with $.Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
@ -157,7 +170,7 @@ spec:
- key: dynamic_config.yaml
path: dynamic_config.yaml
{{- if $.Values.server.additionalVolumes }}
{{- toYaml $.Values.server.additionalVolumes | nindent 8}}
{{- toYaml $.Values.server.additionalVolumes | nindent 8}}
{{- end }}
{{- with (default $.Values.server.nodeSelector $serviceValues.nodeSelector) }}
nodeSelector:
@ -171,6 +184,10 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with $serviceValues.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
---
{{- end }}
{{- end }}

View File

@ -39,7 +39,7 @@ spec:
{{ include "temporal.serviceAccount" . }}
restartPolicy: "OnFailure"
initContainers:
{{- if or .Values.cassandra.enabled (eq (include "temporal.persistence.driver" (list $ "default")) "cassandra") (eq (include "temporal.persistence.driver" (list $ "visibility")) "cassandra") }}
{{- if or .Values.cassandra.enabled (eq (include "temporal.persistence.driver" (list $ "default")) "cassandra") }}
{{- if .Values.cassandra.enabled }}
- name: check-cassandra-service
image: busybox
@ -55,7 +55,7 @@ spec:
- name: create-{{ $store }}-store
image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}"
imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }}
command: ['sh', '-c', 'temporal-cassandra-tool create -k {{ $storeConfig.cassandra.keyspace }} --replication-factor {{ $storeConfig.cassandra.replicationFactor }}']
command: ['temporal-cassandra-tool', 'create', '-k', '{{ $storeConfig.cassandra.keyspace }}', '--replication-factor', '{{ $storeConfig.cassandra.replicationFactor }}']
env:
- name: CASSANDRA_HOST
value: {{ first (splitList "," (include "temporal.persistence.cassandra.hosts" (list $ $store))) }}
@ -80,6 +80,38 @@ spec:
{{- end }}
{{- end }}
{{- end }}
{{- else if or (eq (include "temporal.persistence.driver" (list $ "default")) "sql") (eq (include "temporal.persistence.driver" (list $ "visibility")) "sql") }}
{{- range $store := (list "default" "visibility") }}
{{- $storeConfig := index $.Values.server.config.persistence $store }}
{{- if eq (include "temporal.persistence.driver" (list $ $store)) "sql" }}
- name: create-{{ $store }}-store
image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}"
imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }}
command: ['temporal-sql-tool', '--database', '{{ include "temporal.persistence.sql.database" (list $ $store) }}', 'create-database']
env:
- name: SQL_PLUGIN
value: {{ include "temporal.persistence.sql.driver" (list $ $store) }}
- name: SQL_HOST
value: {{ include "temporal.persistence.sql.host" (list $ $store) }}
- name: SQL_PORT
value: {{ include "temporal.persistence.sql.port" (list $ $store) | quote }}
{{- if $storeConfig.sql.user }}
- name: SQL_USER
value: {{ $storeConfig.sql.user }}
{{- end }}
{{- if (or $storeConfig.sql.password $storeConfig.sql.existingSecret) }}
- name: SQL_PASSWORD
{{- if $storeConfig.sql.existingSecret }}
valueFrom:
secretKeyRef:
name: {{ include "temporal.persistence.secretName" (list $ $store) }}
key: {{ include "temporal.persistence.secretKey" (list $ $store) }}
{{- else }}
value: {{ $storeConfig.sql.password }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- else }}
[]
{{- end }}
@ -89,7 +121,7 @@ spec:
- name: {{ $store }}-schema
image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}"
imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }}
command: ["temporal-{{ include "temporal.persistence.driver" (list $ $store) }}-tool", "setup-schema", "-v", "0.0"]
command: ['temporal-{{ include "temporal.persistence.driver" (list $ $store) }}-tool', 'setup-schema', '-v', '0.0']
env:
{{- if eq (include "temporal.persistence.driver" (list $ $store)) "cassandra" }}
- name: CASSANDRA_HOST
@ -113,8 +145,44 @@ spec:
value: {{ $storeConfig.cassandra.password }}
{{- end }}
{{- end }}
{{- else if eq (include "temporal.persistence.driver" (list $ $store)) "sql" }}
- name: SQL_PLUGIN
value: {{ include "temporal.persistence.sql.driver" (list $ $store) }}
- name: SQL_HOST
value: {{ include "temporal.persistence.sql.host" (list $ $store) }}
- name: SQL_PORT
value: {{ include "temporal.persistence.sql.port" (list $ $store) | quote }}
- name: SQL_DATABASE
value: {{ include "temporal.persistence.sql.database" (list $ $store) }}
{{- if $storeConfig.sql.user }}
- name: SQL_USER
value: {{ $storeConfig.sql.user }}
{{- end }}
{{- if (or $storeConfig.sql.password $storeConfig.sql.existingSecret) }}
- name: SQL_PASSWORD
{{- if $storeConfig.sql.existingSecret }}
valueFrom:
secretKeyRef:
name: {{ include "temporal.persistence.secretName" (list $ $store) }}
key: {{ include "temporal.persistence.secretKey" (list $ $store) }}
{{- else }}
value: {{ $storeConfig.sql.password }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.schema.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.schema.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.schema.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with $.Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
@ -186,12 +254,17 @@ spec:
{{- end }}
containers:
{{- range $store := (list "default" "visibility") }}
{{- if or (eq $store "default") (eq (include "temporal.persistence.driver" (list $ $store)) "sql") }}
{{- $storeConfig := index $.Values.server.config.persistence $store }}
- name: {{ $store }}-schema
image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}"
imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }}
{{- if eq (include "temporal.persistence.driver" (list $ $store)) "cassandra" }}
command: ['sh', '-c', 'temporal-cassandra-tool update-schema -d /etc/temporal/schema/cassandra/{{ include "temporal.persistence.schema" $store }}/versioned']
command: ['temporal-{{ include "temporal.persistence.driver" (list $ $store) }}-tool', 'update-schema', '--schema-dir', '/etc/temporal/schema/cassandra/{{ include "temporal.persistence.schema" $store }}/versioned']
{{- else if eq (include "temporal.persistence.sql.driver" (list $ $store)) "mysql8" }}
command: ['temporal-{{ include "temporal.persistence.driver" (list $ $store) }}-tool', 'update-schema', '--schema-dir', '/etc/temporal/schema/mysql/v8/{{ include "temporal.persistence.schema" $store }}/versioned']
{{- else if eq (include "temporal.persistence.sql.driver" (list $ $store)) "postgres12" }}
command: ['temporal-{{ include "temporal.persistence.driver" (list $ $store) }}-tool', 'update-schema', '--schema-dir', '/etc/temporal/schema/postgresql/v12/{{ include "temporal.persistence.schema" $store }}/versioned']
{{- end }}
env:
{{- if eq (include "temporal.persistence.driver" (list $ $store)) "cassandra" }}
@ -216,8 +289,45 @@ spec:
value: {{ $storeConfig.cassandra.password }}
{{- end }}
{{- end }}
{{- else if eq (include "temporal.persistence.driver" (list $ $store)) "sql" }}
- name: SQL_PLUGIN
value: {{ include "temporal.persistence.sql.driver" (list $ $store) }}
- name: SQL_HOST
value: {{ include "temporal.persistence.sql.host" (list $ $store) }}
- name: SQL_PORT
value: {{ include "temporal.persistence.sql.port" (list $ $store) | quote }}
- name: SQL_DATABASE
value: {{ include "temporal.persistence.sql.database" (list $ $store) }}
{{- if $storeConfig.sql.user }}
- name: SQL_USER
value: {{ $storeConfig.sql.user }}
{{- end }}
{{- if (or $storeConfig.sql.password $storeConfig.sql.existingSecret) }}
- name: SQL_PASSWORD
{{- if $storeConfig.sql.existingSecret }}
valueFrom:
secretKeyRef:
name: {{ include "temporal.persistence.secretName" (list $ $store) }}
key: {{ include "temporal.persistence.secretKey" (list $ $store) }}
{{- else }}
value: {{ $storeConfig.sql.password }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.schema.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.schema.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.schema.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with (default $.Values.admintools.nodeSelector) }}
nodeSelector:
{{- toYaml . | nindent 8 }}
@ -232,7 +342,7 @@ spec:
{{- end }}
---
{{- end }}
{{- if or $.Values.elasticsearch.enabled $.Values.elasticsearch.external }}
{{- if and (or $.Values.elasticsearch.enabled $.Values.elasticsearch.external) .Values.schema.setup.enabled }}
apiVersion: batch/v1
kind: Job
metadata:
@ -272,21 +382,30 @@ spec:
{{ include "temporal.serviceAccount" . }}
restartPolicy: "OnFailure"
initContainers:
- name: check-elasticsearch-service
image: busybox
command: ['sh', '-c', 'until nslookup {{ .Values.elasticsearch.host }}; do echo waiting for elasticsearch service; sleep 1; done;']
- name: check-elasticsearch
image: "{{ .Values.admintools.image.repository }}:{{ .Values.admintools.image.tag }}"
imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }}
command: ['sh', '-c', 'until curl --silent --fail --user {{ .Values.elasticsearch.username }}:{{ .Values.elasticsearch.password }} {{ .Values.elasticsearch.scheme }}://{{ .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }} 2>&1 > /dev/null; do echo waiting for elasticsearch to start; sleep 1; done;']
command: ['sh', '-c', 'until curl --silent --fail {{- if and .Values.elasticsearch.username .Values.elasticsearch.password }} --user {{ .Values.elasticsearch.username }}:{{ .Values.elasticsearch.password }} {{- end }} {{ .Values.elasticsearch.scheme }}://{{ .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }} 2>&1 > /dev/null; do echo waiting for elasticsearch to start; sleep 1; done;']
containers:
- name: create-elasticsearch-index
image: "{{ $.Values.admintools.image.repository }}:{{ $.Values.admintools.image.tag }}"
imagePullPolicy: {{ $.Values.admintools.image.pullPolicy }}
command: ['sh', '-c']
args:
- 'curl -X PUT --fail --user {{ .Values.elasticsearch.username }}:{{ .Values.elasticsearch.password }} {{ .Values.elasticsearch.scheme }}://{{ .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }}/_template/temporal_visibility_v1_template -H "Content-Type: application/json" --data-binary "@schema/elasticsearch/visibility/index_template_{{ .Values.elasticsearch.version }}.json" 2>&1 &&
curl -X PUT --fail --user {{ .Values.elasticsearch.username }}:{{ .Values.elasticsearch.password }} {{ .Values.elasticsearch.scheme }}://{{ .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }}/{{ .Values.elasticsearch.visibilityIndex }} 2>&1'
- 'curl -X PUT --fail {{- if and .Values.elasticsearch.username .Values.elasticsearch.password }} --user {{ .Values.elasticsearch.username }}:{{ .Values.elasticsearch.password }} {{- end }} {{ .Values.elasticsearch.scheme }}://{{ .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }}/_template/temporal_visibility_v1_template -H "Content-Type: application/json" --data-binary "@schema/elasticsearch/visibility/index_template_{{ .Values.elasticsearch.version }}.json" 2>&1 &&
curl -X PUT --fail {{- if and .Values.elasticsearch.username .Values.elasticsearch.password }} --user {{ .Values.elasticsearch.username }}:{{ .Values.elasticsearch.password }} {{- end }} {{ .Values.elasticsearch.scheme }}://{{ .Values.elasticsearch.host }}:{{ .Values.elasticsearch.port }}/{{ .Values.elasticsearch.visibilityIndex }} 2>&1'
{{- with .Values.schema.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.schema.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.schema.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with (default $.Values.admintools.nodeSelector) }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View File

@ -0,0 +1,27 @@
{{- if $.Values.server.enabled }}
{{- range $service := (list "frontend" "history" "matching" "worker") }}
{{- $serviceValues := index $.Values.server $service -}}
{{- if and (gt ($serviceValues.replicaCount | int) 1) ($serviceValues.podDisruptionBudget) }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "temporal.componentname" (list $ $service) }}-pdb
labels:
app.kubernetes.io/name: {{ include "temporal.name" $ }}
helm.sh/chart: {{ include "temporal.chart" $ }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }}
app.kubernetes.io/component: {{ $service }}
app.kubernetes.io/part-of: {{ $.Chart.Name }}
spec:
{{ toYaml $serviceValues.podDisruptionBudget }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "temporal.name" $ }}
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/component: {{ $service }}
{{- end }}
---
{{- end }}
{{- end }}

View File

@ -16,6 +16,13 @@ metadata:
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }}
app.kubernetes.io/part-of: {{ $.Chart.Name }}
{{- with $.Values.server.secretLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with $.Values.server.secretAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if eq (include "temporal.persistence.driver" (list $ $store)) "cassandra" }}

View File

@ -28,6 +28,9 @@ spec:
app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Chart.Name }}
{{- with .Values.web.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.web.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
@ -38,23 +41,37 @@ spec:
- name: {{ .Chart.Name }}-web-config
configMap:
name: {{ include "temporal.componentname" (list . "web") }}-config
{{- if .Values.web.additionalVolumes }}
{{- toYaml .Values.web.additionalVolumes | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-web
image: "{{ .Values.web.image.repository }}:{{ .Values.web.image.tag }}"
imagePullPolicy: {{ .Values.web.image.pullPolicy }}
env:
- name: TEMPORAL_GRPC_ENDPOINT
- name: TEMPORAL_ADDRESS
value: "{{ include "temporal.fullname" . }}-frontend.{{ .Release.Namespace }}.svc:{{ .Values.server.frontend.service.port }}"
volumeMounts:
- name: {{ .Chart.Name }}-web-config
mountPath: /usr/app/server/config.yml
subPath: config.yml
{{- if .Values.web.additionalEnv }}
{{- toYaml .Values.web.additionalEnv | nindent 12 }}
{{- end }}
ports:
- name: http
containerPort: 8088
containerPort: 8080
protocol: TCP
resources:
{{- toYaml .Values.web.resources | nindent 12 }}
{{- with .Values.web.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.web.additionalVolumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.web.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.web.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View File

@ -22,6 +22,9 @@ metadata:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- with .Values.web.ingress.className }}
ingressClassName: {{ . | quote }}
{{- end }}
{{- if .Values.web.ingress.tls }}
tls:
{{- range .Values.web.ingress.tls }}

View File

@ -18,10 +18,10 @@ serviceAccount:
server:
enabled: true
sidecarContainers:
sidecarContainers: {}
image:
repository: temporalio/server
tag: 1.15.2
tag: "1.20"
pullPolicy: IfNotPresent
# Global default settings (can be overridden per service)
@ -31,6 +31,8 @@ server:
# Use this if you installed Prometheus from a Helm chart.
annotations:
enabled: true
# Additional tags to be added to Prometheus metrics
tags: {}
# Enable Prometheus ServiceMonitor
# Use this if you installed the Prometheus Operator (https://github.com/coreos/prometheus-operator).
serviceMonitor:
@ -62,8 +64,10 @@ server:
prometheus:
timerType: histogram
podAnnotations: {}
resources:
{}
podLabels: {}
secretLabels: {}
secretAnnotations: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
@ -79,6 +83,10 @@ server:
affinity: {}
additionalVolumes: []
additionalVolumeMounts: []
additionalEnv: []
securityContext:
fsGroup: 1000
runAsUser: 1000
config:
logLevel: "debug,info"
@ -86,6 +94,35 @@ server:
# IMPORTANT: This value cannot be changed, once it's set.
numHistoryShards: 512
# Define your TLS configuration here. See https://docs.temporal.io/references/configuration#tls
# for configuration options. You must also use `server.additionalVolumeMounts` and `server.additionalVolumes`
# to mount certificates (from Secret or ConfigMap etc) to the path you use below.
# tls:
# internode:
# server:
# certFile: /path/to/internode/cert/file
# keyFile: /path/to/internode/key/file
# requireClientAuth: true
# clientCaFiles:
# - /path/to/internode/serverCa
# client:
# serverName: dnsSanInInternodeCertificate
# rootCaFiles:
# - /path/to/internode/serverCa
# frontend:
# server:
# certFile: /path/to/frontend/cert/file
# keyFile: /path/to/frontend/key/file
# requireClientAuth: true
# clientCaFiles:
# - /path/to/internode/serverCa
# - /path/to/sdkClientPool1/ca
# - /path/to/sdkClientPool2/ca
# client:
# serverName: dnsSanInFrontendCertificate
# rootCaFiles:
# - /path/to/frontend/serverCa
persistence:
defaultStore: default
additionalStores: {}
@ -121,7 +158,7 @@ server:
maxConns: 20
maxConnLifetime: "1h"
# connectAttributes:
# tx_isolation: 'READ-COMMITTED'
# tx_isolation: 'READ-COMMITTED'
visibility:
driver: "cassandra"
@ -157,11 +194,11 @@ server:
# tx_isolation: 'READ-COMMITTED'
frontend:
# replicaCount: 1
service:
annotations: {} # Evaluated as template
type: ClusterIP
port: 7233
membershipPort: 6933
metrics:
annotations:
enabled: true
@ -170,16 +207,21 @@ server:
prometheus: {}
# timerType: histogram
podAnnotations: {}
podLabels: {}
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
additionalEnv: []
containerSecurityContext: {}
topologySpreadConstraints: {}
podDisruptionBudget: {}
history:
# replicaCount: 1
service:
# type: ClusterIP
port: 7234
membershipPort: 6934
metrics:
annotations:
enabled: true
@ -188,16 +230,21 @@ server:
prometheus: {}
# timerType: histogram
podAnnotations: {}
podLabels: {}
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
additionalEnv: []
containerSecurityContext: {}
topologySpreadConstraints: {}
podDisruptionBudget: {}
matching:
# replicaCount: 1
service:
# type: ClusterIP
port: 7235
membershipPort: 6935
metrics:
annotations:
enabled: false
@ -206,16 +253,21 @@ server:
prometheus: {}
# timerType: histogram
podAnnotations: {}
podLabels: {}
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
additionalEnv: []
containerSecurityContext: {}
topologySpreadConstraints: {}
podDisruptionBudget: {}
worker:
# replicaCount: 1
service:
# type: ClusterIP
port: 7239
membershipPort: 6939
metrics:
annotations:
enabled: true
@ -224,25 +276,37 @@ server:
prometheus: {}
# timerType: histogram
podAnnotations: {}
podLabels: {}
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
additionalEnv: []
containerSecurityContext: {}
topologySpreadConstraints: {}
podDisruptionBudget: {}
admintools:
enabled: true
image:
repository: temporalio/admin-tools
tag: 1.15.2
tag: 1.22.4
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 22
annotations: {}
podLabels: {}
podAnnotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
additionalEnv: []
resources: {}
containerSecurityContext: {}
securityContext: {}
podDisruptionBudget: {}
web:
enabled: true
@ -254,24 +318,24 @@ web:
default_to_namespace: # internal use only
issue_report_link: https://github.com/temporalio/web/issues/new/choose # set this field if you need to direct people to internal support forums
replicaCount: 1
image:
repository: temporalio/web
tag: 1.14.0
repository: temporalio/ui
tag: 2.16.2
pullPolicy: IfNotPresent
service:
# set type to NodePort if access to web needs access from outside the cluster
# for more info see https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
port: 8088
port: 8080
annotations: {}
# loadBalancerIP:
ingress:
enabled: false
# className:
annotations: {}
# kubernetes.io/ingress.class: traefik
# ingress.kubernetes.io/ssl-redirect: "false"
@ -286,6 +350,7 @@ web:
# - chart-example.local
podAnnotations: {}
podLabels: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
@ -305,6 +370,15 @@ web:
affinity: {}
additionalVolumes: []
additionalVolumeMounts: []
additionalEnv: []
containerSecurityContext: {}
securityContext: {}
schema:
setup:
enabled: true
@ -312,13 +386,16 @@ schema:
update:
enabled: true
backoffLimit: 100
resources: {}
containerSecurityContext: {}
securityContext: {}
elasticsearch:
enabled: true
replicas: 3
persistence:
enabled: false
imageTag: 7.16.2
imageTag: 7.17.3
host: elasticsearch-master-headless
scheme: http
port: 9200
@ -346,43 +423,49 @@ grafana:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
- name: "default"
orgId: 1
folder: ""
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: TemporalMetrics
type: prometheus
url: http://{{ .Release.Name }}-prometheus-server
access: proxy
isDefault: true
datasources.yaml:
apiVersion: 1
datasources:
- name: TemporalMetrics
type: prometheus
url: http://{{ .Release.Name }}-prometheus-server
access: proxy
isDefault: true
dashboards:
default:
frontend-github:
url: https://raw.githubusercontent.com/temporalio/temporal-dashboards/master/dashboards/frontend.json
datasource: TemporalMetrics
temporal-github:
url: https://raw.githubusercontent.com/temporalio/temporal-dashboards/master/dashboards/temporal.json
datasource: TemporalMetrics
history-github:
url: https://raw.githubusercontent.com/temporalio/temporal-dashboards/master/dashboards/history.json
datasource: TemporalMetrics
matching-github:
url: https://raw.githubusercontent.com/temporalio/temporal-dashboards/master/dashboards/matching.json
datasource: TemporalMetrics
clusteroverview-github:
url: https://raw.githubusercontent.com/temporalio/temporal-dashboards/master/dashboards/10000.json
datasource: TemporalMetrics
common-github:
url: https://raw.githubusercontent.com/temporalio/temporal-dashboards/master/dashboards/common.json
datasource: TemporalMetrics
default:
server-general-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/server/server-general.json
datasource: TemporalMetrics
sdk-general-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/sdk/sdk-general.json
datasource: TemporalMetrics
misc-advanced-visibility-specific-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/advanced-visibility-specific.json
datasource: TemporalMetrics
misc-clustermonitoring-kubernetes-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/clustermonitoring-kubernetes.json
datasource: TemporalMetrics
misc-frontend-service-specific-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/frontend-service-specific.json
datasource: TemporalMetrics
misc-history-service-specific-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/history-service-specific.json
datasource: TemporalMetrics
misc-matching-service-specific-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/matching-service-specific.json
datasource: TemporalMetrics
misc-worker-service-specific-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/worker-service-specific.json
datasource: TemporalMetrics
cassandra:
enabled: true

View File

@ -0,0 +1,25 @@
server:
archival:
history:
state: "enabled"
enableRead: true
provider:
filestore:
fileMode: "0666"
dirMode: "0766"
visibility:
state: "enabled"
enableRead: true
provider:
filestore:
fileMode: "0666"
dirMode: "0766"
namespaceDefaults:
archival:
history:
state: "enabled"
URI: "file:///tmp/temporal_archival/development"
visibility:
state: "enabled"
URI: "file:///tmp/temporal_vis_archival/development"

View File

@ -0,0 +1,23 @@
server:
archival:
history:
state: "enabled"
enableRead: true
provider:
gstorage:
credentialsPath: "/tmp/keyfile.json"
visibility:
state: "enabled"
enableRead: true
provider:
gstorage:
credentialsPath: "/tmp/keyfile.json"
namespaceDefaults:
archival:
history:
state: "enabled"
URI: "gs://my-bucket-cad/temporal_archival/development"
visibility:
state: "enabled"
URI: "gs://my-bucket-cad/temporal_archival/visibility"

View File

@ -0,0 +1,23 @@
server:
archival:
history:
state: "enabled"
enableRead: true
provider:
s3store:
region: "us-east-1"
visibility:
state: "enabled"
enableRead: true
provider:
s3store:
region: "us-east-1"
namespaceDefaults:
archival:
history:
state: "enabled"
URI: "s3://archival-bucket-name"
visibility:
state: "enabled"
URI: "s3://visibility-archival-bucket-name"

View File

@ -8,6 +8,24 @@ server:
default:
driver: "cassandra"
# faultInjection:
# targets:
# dataStores:
# ExecutionStore:
# methods:
# GetCurrentExecution:
# errors:
# ResourceExhausted: 0.1
# AppendHistoryNodes:
# errors:
# ResourceExhausted: 0.05
# UpdateWorkflowExecution:
# errors:
# ResourceExhausted: 0.15
# GetWorkflowExecution:
# errors:
# ResourceExhausted: 0.15
cassandra:
hosts: ["cassandra.default.svc.cluster.local"]
port: 9042
@ -21,22 +39,6 @@ server:
consistency: "local_quorum"
serialConsistency: "local_serial"
visibility:
driver: "cassandra"
cassandra:
hosts: ["cassandra.default.svc.cluster.local"]
port: 9042
keyspace: temporal_visibility
user: "user"
password: "password"
existingSecret: ""
replicationFactor: 1
consistency:
default:
consistency: "local_quorum"
serialConsistency: "local_serial"
cassandra:
enabled: false

View File

@ -5,7 +5,7 @@ server:
driver: "sql"
sql:
driver: "mysql"
driver: "mysql8"
host: _HOST_
port: 3306
database: temporal
@ -18,7 +18,7 @@ server:
driver: "sql"
sql:
driver: "mysql"
driver: "mysql8"
host: _HOST_
port: 3306
database: temporal_visibility

View File

@ -5,27 +5,41 @@ server:
driver: "sql"
sql:
driver: "postgres"
driver: "postgres12"
host: _HOST_
port: 5432
database: temporal
user: _USERNAME_
password: _PASSWORD_
# for a production deployment use this instead of `password` and provision the secret beforehand e.g. with a sealed secret
# it has a single key called `password`
# existingSecret: temporal-default-store
maxConns: 20
maxConnLifetime: "1h"
# tls:
# enabled: true
# enableHostVerification: true
# serverName: _HOST_ # this is strictly required when using serverless CRDB offerings
visibility:
driver: "sql"
sql:
driver: "postgres"
driver: "postgres12"
host: _HOST_
port: 5432
database: temporal_visibility
user: _USERNAME_
password: _PASSWORD_
# for a production deployment use this instead of `password` and provision the secret beforehand e.g. with a sealed secret
# it has a single key called `password`
# existingSecret: temporal-visibility-store
maxConns: 20
maxConnLifetime: "1h"
# tls:
# enabled: true
# enableHostVerification: true
# serverName: _HOST_ # this is strictly required when using serverless CRDB offerings
cassandra:
enabled: false
@ -36,6 +50,15 @@ mysql:
postgresql:
enabled: true
prometheus:
enabled: true
grafana:
enabled: true
elasticsearch:
enabled: true
schema:
setup:
enabled: false

View File

@ -0,0 +1,28 @@
prometheus:
alertmanager:
enabled: false
alertmanagerFiles:
alertmanager.yml: {}
kubeStateMetrics:
enabled: false
nodeExporter:
enabled: false
pushgateway:
enabled: false
server:
persistentVolume:
enabled: false
extraArgs:
# minimal possible values
storage.tsdb.retention: 6h
storage.tsdb.min-block-duration: 2h
storage.tsdb.max-block-duration: 2h
serverFiles:
alerts: {}
prometheus.yml:
remote_write:
- url: _URL_
basic_auth:
password: _PASSWORD_
username: _USERNAME_
rules: {}

View File

@ -13,6 +13,7 @@ spec:
protocol: HTTP
hosts:
- "*.pdev.resf.localhost"
- "*.pdev.resf.local"
tls:
httpsRedirect: true
- port:
@ -21,6 +22,7 @@ spec:
protocol: HTTPS
hosts:
- "*.pdev.resf.localhost"
- "*.pdev.resf.local"
tls:
mode: SIMPLE
credentialName: default-cert

View File

@ -40,7 +40,7 @@ import {
} from '../../../common/frontend_server/upstream.mjs';
export default async function run(webpackConfig) {
const devFrontendUrl = 'http://obsidian.pdot.localhost:16000';
const devFrontendUrl = 'https://id-dev.internal.pdev.resf.localhost';
const envPublicUrl = process.env['OBSIDIAN_FRONTEND_HTTP_PUBLIC_URL'];
const frontendUrl = process.env['RESF_NS'] ? envPublicUrl : devFrontendUrl;

View File

@ -48,6 +48,17 @@ import {
translateTaskTypeToText,
} from './ProjectTasks';
function formatDuration(ms) {
const seconds = Math.floor((ms / 1000) % 60);
const minutes = Math.floor((ms / (1000 * 60)) % 60);
const hours = Math.floor((ms / (1000 * 60 * 60)) % 24);
return [hours, minutes, seconds]
.map(val => (val < 10 ? `0${val}` : val)) // Adding leading zeros if necessary
.join(':');
}
export interface ProjectTasksSubtasksProps {
subtasks: V1Subtask[];
}
@ -79,7 +90,7 @@ export const ProjectTasksSubtasks = (props: ProjectTasksSubtasksProps) => {
(new Date(subtask.finishedAt) as any) -
(new Date(subtask.createdAt) as any);
subtaskDuration = (
<>{new Date(difference - 3600000).toLocaleTimeString()}</>
<>{formatDuration(difference)}</>
);
}

View File

@ -1,4 +1,4 @@
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("@rules_pkg//:pkg.bzl", "pkg_tar")
load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_layer", "container_push")
load("@io_bazel_rules_docker//nodejs:image.bzl", "nodejs_image")

View File

@ -1,5 +1,5 @@
FROM quay.io/authzed/spicedb:v1.2.0 as spicedb
FROM quay.io/rockylinux/rockylinux:8
FROM quay.io/authzed/spicedb:v1.28.0 as spicedb
FROM quay.io/rockylinux/rockylinux:9
COPY --from=spicedb /usr/local/bin/spicedb /usr/local/bin/spicedb
COPY --from=spicedb /usr/local/bin/grpc_health_probe /usr/local/bin/grpc_health_probe

View File

@ -6,7 +6,7 @@ local DSN = db.dsn('hydra');
{
image: 'quay.io/peridot/spicedb',
tag: 'v0.3.21',
tag: 'v0.3.29',
legacyDb: true,
dsn: {
name: 'DSN',

View File

@ -3,6 +3,6 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def python_download():
http_archive(
name = "rules_python",
url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz",
url = "https://github.com/bazelbuild/rules_python/releases/download/0.31.0/rules_python-0.2.0.tar.gz",
sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f",
)