peridot/infrastructure/dev-helm/temporal/values.yaml

491 lines
12 KiB
YAML
Raw Permalink Normal View History

2022-07-07 20:11:50 +00:00
nameOverride: ""
fullnameOverride: ""
# Chart debug mode
# (eg. disable helm hook delete policy)
debug: false
# Custom Service account management
serviceAccount:
# Whether to create service account or not
create: false
# Name of the service account, default: temporal.fullname
name:
# extraAnnotations would let users add additional annotations
extraAnnotations:
server:
enabled: true
2024-03-11 19:20:43 +00:00
sidecarContainers: {}
2022-07-07 20:11:50 +00:00
image:
repository: temporalio/server
2024-03-15 02:27:48 +00:00
tag: "1.20"
2022-07-07 20:11:50 +00:00
pullPolicy: IfNotPresent
# Global default settings (can be overridden per service)
replicaCount: 1
metrics:
# Annotate pods directly with Prometheus annotations.
# Use this if you installed Prometheus from a Helm chart.
annotations:
enabled: true
2024-03-11 19:20:43 +00:00
# Additional tags to be added to Prometheus metrics
tags: {}
2022-07-07 20:11:50 +00:00
# Enable Prometheus ServiceMonitor
# Use this if you installed the Prometheus Operator (https://github.com/coreos/prometheus-operator).
serviceMonitor:
enabled: false
interval: 30s
# Set additional lables to all the ServiceMonitor resources
additionalLabels: {}
# label1: value1
# label2: value2
# Set Prometheus metric_relabel_configs via ServiceMonitor
# Use metricRelabelings to adjust metric and label names as needed
metricRelabelings: []
# - action: replace
# sourceLabels:
# - exported_namespace
# targetLabel: temporal_namespace
# - action: replace
# regex: service_errors_(.+)
# replacement: ${1}
# sourceLabels:
# - __name__
# targetLabel: temporal_error_kind
# - action: replace
# regex: service_errors_.+
# replacement: temporal_service_errors
# sourceLabels:
# - __name__
# targetLabel: __name__
prometheus:
timerType: histogram
podAnnotations: {}
2024-03-11 19:20:43 +00:00
podLabels: {}
secretLabels: {}
secretAnnotations: {}
resources: {}
2022-07-07 20:11:50 +00:00
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
additionalVolumes: []
additionalVolumeMounts: []
2024-03-11 19:20:43 +00:00
additionalEnv: []
securityContext:
fsGroup: 1000
runAsUser: 1000
2022-07-07 20:11:50 +00:00
config:
logLevel: "debug,info"
# IMPORTANT: This value cannot be changed, once it's set.
numHistoryShards: 512
2024-03-11 19:20:43 +00:00
# Define your TLS configuration here. See https://docs.temporal.io/references/configuration#tls
# for configuration options. You must also use `server.additionalVolumeMounts` and `server.additionalVolumes`
# to mount certificates (from Secret or ConfigMap etc) to the path you use below.
# tls:
# internode:
# server:
# certFile: /path/to/internode/cert/file
# keyFile: /path/to/internode/key/file
# requireClientAuth: true
# clientCaFiles:
# - /path/to/internode/serverCa
# client:
# serverName: dnsSanInInternodeCertificate
# rootCaFiles:
# - /path/to/internode/serverCa
# frontend:
# server:
# certFile: /path/to/frontend/cert/file
# keyFile: /path/to/frontend/key/file
# requireClientAuth: true
# clientCaFiles:
# - /path/to/internode/serverCa
# - /path/to/sdkClientPool1/ca
# - /path/to/sdkClientPool2/ca
# client:
# serverName: dnsSanInFrontendCertificate
# rootCaFiles:
# - /path/to/frontend/serverCa
2022-07-07 20:11:50 +00:00
persistence:
defaultStore: default
additionalStores: {}
default:
driver: "cassandra"
cassandra:
hosts: []
# port: 9042
keyspace: "temporal"
user: "user"
password: "password"
existingSecret: ""
replicationFactor: 1
consistency:
default:
consistency: "local_quorum"
serialConsistency: "local_serial"
# datacenter: "us-east-1a"
# maxQPS: 1000
# maxConns: 2
sql:
driver: "mysql"
host: "mysql"
port: 3306
database: "temporal"
user: "temporal"
password: "temporal"
existingSecret: ""
secretName: ""
maxConns: 20
maxConnLifetime: "1h"
# connectAttributes:
2024-03-11 19:20:43 +00:00
# tx_isolation: 'READ-COMMITTED'
2022-07-07 20:11:50 +00:00
visibility:
driver: "cassandra"
cassandra:
hosts: []
# port: 9042
keyspace: "temporal_visibility"
user: "user"
password: "password"
existingSecret: ""
# datacenter: "us-east-1a"
# maxQPS: 1000
# maxConns: 2
replicationFactor: 1
consistency:
default:
consistency: "local_quorum"
serialConsistency: "local_serial"
sql:
driver: "mysql"
host: "mysql"
port: 3306
database: "temporal_visibility"
user: "temporal"
password: "temporal"
existingSecret: ""
secretName: ""
maxConns: 20
maxConnLifetime: "1h"
# connectAttributes:
# tx_isolation: 'READ-COMMITTED'
frontend:
service:
annotations: {} # Evaluated as template
type: ClusterIP
port: 7233
2024-03-15 02:27:48 +00:00
membershipPort: 6933
2022-07-07 20:11:50 +00:00
metrics:
annotations:
enabled: true
serviceMonitor: {}
# enabled: false
prometheus: {}
# timerType: histogram
podAnnotations: {}
2024-03-11 19:20:43 +00:00
podLabels: {}
2022-07-07 20:11:50 +00:00
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
2024-03-11 19:20:43 +00:00
additionalEnv: []
containerSecurityContext: {}
topologySpreadConstraints: {}
podDisruptionBudget: {}
2022-07-07 20:11:50 +00:00
history:
service:
# type: ClusterIP
port: 7234
2024-03-15 02:27:48 +00:00
membershipPort: 6934
2022-07-07 20:11:50 +00:00
metrics:
annotations:
enabled: true
serviceMonitor: {}
# enabled: false
prometheus: {}
# timerType: histogram
podAnnotations: {}
2024-03-11 19:20:43 +00:00
podLabels: {}
2022-07-07 20:11:50 +00:00
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
2024-03-11 19:20:43 +00:00
additionalEnv: []
containerSecurityContext: {}
topologySpreadConstraints: {}
podDisruptionBudget: {}
2022-07-07 20:11:50 +00:00
matching:
service:
# type: ClusterIP
port: 7235
2024-03-15 02:27:48 +00:00
membershipPort: 6935
2022-07-07 20:11:50 +00:00
metrics:
annotations:
enabled: false
serviceMonitor: {}
# enabled: false
prometheus: {}
# timerType: histogram
podAnnotations: {}
2024-03-11 19:20:43 +00:00
podLabels: {}
2022-07-07 20:11:50 +00:00
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
2024-03-11 19:20:43 +00:00
additionalEnv: []
containerSecurityContext: {}
topologySpreadConstraints: {}
podDisruptionBudget: {}
2022-07-07 20:11:50 +00:00
worker:
service:
# type: ClusterIP
port: 7239
2024-03-15 02:27:48 +00:00
membershipPort: 6939
2022-07-07 20:11:50 +00:00
metrics:
annotations:
enabled: true
serviceMonitor: {}
# enabled: false
prometheus: {}
# timerType: histogram
podAnnotations: {}
2024-03-11 19:20:43 +00:00
podLabels: {}
2022-07-07 20:11:50 +00:00
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
2024-03-11 19:20:43 +00:00
additionalEnv: []
containerSecurityContext: {}
topologySpreadConstraints: {}
podDisruptionBudget: {}
2022-07-07 20:11:50 +00:00
admintools:
enabled: true
image:
repository: temporalio/admin-tools
2024-03-11 19:20:43 +00:00
tag: 1.22.4
2022-07-07 20:11:50 +00:00
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 22
annotations: {}
2024-03-11 19:20:43 +00:00
podLabels: {}
podAnnotations: {}
2022-07-07 20:11:50 +00:00
nodeSelector: {}
tolerations: []
affinity: {}
2024-03-11 19:20:43 +00:00
additionalEnv: []
resources: {}
containerSecurityContext: {}
securityContext: {}
podDisruptionBudget: {}
2022-07-07 20:11:50 +00:00
web:
enabled: true
config:
# server/config.yml file content
auth:
enabled: false
routing:
default_to_namespace: # internal use only
issue_report_link: https://github.com/temporalio/web/issues/new/choose # set this field if you need to direct people to internal support forums
replicaCount: 1
image:
2024-03-11 19:20:43 +00:00
repository: temporalio/ui
tag: 2.16.2
2022-07-07 20:11:50 +00:00
pullPolicy: IfNotPresent
service:
# set type to NodePort if access to web needs access from outside the cluster
# for more info see https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
2024-03-11 19:20:43 +00:00
port: 8080
2022-07-07 20:11:50 +00:00
annotations: {}
# loadBalancerIP:
ingress:
enabled: false
2024-03-11 19:20:43 +00:00
# className:
2022-07-07 20:11:50 +00:00
annotations: {}
# kubernetes.io/ingress.class: traefik
# ingress.kubernetes.io/ssl-redirect: "false"
# traefik.frontend.rule.type: PathPrefix
hosts:
- "/"
# - "domain.com/xyz"
# - "domain.com"
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
podAnnotations: {}
2024-03-11 19:20:43 +00:00
podLabels: {}
2022-07-07 20:11:50 +00:00
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
2024-03-11 19:20:43 +00:00
additionalVolumes: []
additionalVolumeMounts: []
additionalEnv: []
containerSecurityContext: {}
2024-03-15 02:27:48 +00:00
2024-03-11 19:20:43 +00:00
securityContext: {}
2022-07-07 20:11:50 +00:00
schema:
setup:
enabled: true
backoffLimit: 100
update:
enabled: true
backoffLimit: 100
2024-03-11 19:20:43 +00:00
resources: {}
containerSecurityContext: {}
securityContext: {}
2022-07-07 20:11:50 +00:00
elasticsearch:
enabled: true
replicas: 3
persistence:
enabled: false
2024-03-11 19:20:43 +00:00
imageTag: 7.17.3
2022-07-07 20:11:50 +00:00
host: elasticsearch-master-headless
scheme: http
port: 9200
version: "v7"
logLevel: "error"
username: ""
password: ""
visibilityIndex: "temporal_visibility_v1_dev"
prometheus:
enabled: true
nodeExporter:
enabled: false
grafana:
enabled: true
replicas: 1
testFramework:
enabled: false
rbac:
create: false
pspEnabled: false
namespaced: true
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
2024-03-11 19:20:43 +00:00
- name: "default"
orgId: 1
folder: ""
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
2022-07-07 20:11:50 +00:00
datasources:
2024-03-11 19:20:43 +00:00
datasources.yaml:
apiVersion: 1
datasources:
- name: TemporalMetrics
type: prometheus
url: http://{{ .Release.Name }}-prometheus-server
access: proxy
isDefault: true
2022-07-07 20:11:50 +00:00
dashboards:
2024-03-11 19:20:43 +00:00
default:
server-general-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/server/server-general.json
datasource: TemporalMetrics
sdk-general-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/sdk/sdk-general.json
datasource: TemporalMetrics
misc-advanced-visibility-specific-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/advanced-visibility-specific.json
datasource: TemporalMetrics
misc-clustermonitoring-kubernetes-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/clustermonitoring-kubernetes.json
datasource: TemporalMetrics
misc-frontend-service-specific-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/frontend-service-specific.json
datasource: TemporalMetrics
misc-history-service-specific-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/history-service-specific.json
datasource: TemporalMetrics
misc-matching-service-specific-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/matching-service-specific.json
datasource: TemporalMetrics
misc-worker-service-specific-github:
url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/worker-service-specific.json
datasource: TemporalMetrics
2022-07-07 20:11:50 +00:00
cassandra:
enabled: true
persistence:
enabled: false
image:
repo: cassandra
tag: 3.11.3
pullPolicy: IfNotPresent
config:
cluster_size: 3
ports:
cql: 9042
num_tokens: 4
max_heap_size: 512M
heap_new_size: 128M
seed_size: 0
service:
type: ClusterIP
mysql:
enabled: false