nameOverride: "" fullnameOverride: "" # Chart debug mode # (eg. disable helm hook delete policy) debug: false # Custom Service account management serviceAccount: # Whether to create service account or not create: false # Name of the service account, default: temporal.fullname name: # extraAnnotations would let users add additional annotations extraAnnotations: server: enabled: true sidecarContainers: {} image: repository: temporalio/server tag: "1.20" pullPolicy: IfNotPresent # Global default settings (can be overridden per service) replicaCount: 1 metrics: # Annotate pods directly with Prometheus annotations. # Use this if you installed Prometheus from a Helm chart. annotations: enabled: true # Additional tags to be added to Prometheus metrics tags: {} # Enable Prometheus ServiceMonitor # Use this if you installed the Prometheus Operator (https://github.com/coreos/prometheus-operator). serviceMonitor: enabled: false interval: 30s # Set additional lables to all the ServiceMonitor resources additionalLabels: {} # label1: value1 # label2: value2 # Set Prometheus metric_relabel_configs via ServiceMonitor # Use metricRelabelings to adjust metric and label names as needed metricRelabelings: [] # - action: replace # sourceLabels: # - exported_namespace # targetLabel: temporal_namespace # - action: replace # regex: service_errors_(.+) # replacement: ${1} # sourceLabels: # - __name__ # targetLabel: temporal_error_kind # - action: replace # regex: service_errors_.+ # replacement: temporal_service_errors # sourceLabels: # - __name__ # targetLabel: __name__ prometheus: timerType: histogram podAnnotations: {} podLabels: {} secretLabels: {} secretAnnotations: {} resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} additionalVolumes: [] additionalVolumeMounts: [] additionalEnv: [] securityContext: fsGroup: 1000 runAsUser: 1000 config: logLevel: "debug,info" # IMPORTANT: This value cannot be changed, once it's set. numHistoryShards: 512 # Define your TLS configuration here. See https://docs.temporal.io/references/configuration#tls # for configuration options. You must also use `server.additionalVolumeMounts` and `server.additionalVolumes` # to mount certificates (from Secret or ConfigMap etc) to the path you use below. # tls: # internode: # server: # certFile: /path/to/internode/cert/file # keyFile: /path/to/internode/key/file # requireClientAuth: true # clientCaFiles: # - /path/to/internode/serverCa # client: # serverName: dnsSanInInternodeCertificate # rootCaFiles: # - /path/to/internode/serverCa # frontend: # server: # certFile: /path/to/frontend/cert/file # keyFile: /path/to/frontend/key/file # requireClientAuth: true # clientCaFiles: # - /path/to/internode/serverCa # - /path/to/sdkClientPool1/ca # - /path/to/sdkClientPool2/ca # client: # serverName: dnsSanInFrontendCertificate # rootCaFiles: # - /path/to/frontend/serverCa persistence: defaultStore: default additionalStores: {} default: driver: "cassandra" cassandra: hosts: [] # port: 9042 keyspace: "temporal" user: "user" password: "password" existingSecret: "" replicationFactor: 1 consistency: default: consistency: "local_quorum" serialConsistency: "local_serial" # datacenter: "us-east-1a" # maxQPS: 1000 # maxConns: 2 sql: driver: "mysql" host: "mysql" port: 3306 database: "temporal" user: "temporal" password: "temporal" existingSecret: "" secretName: "" maxConns: 20 maxConnLifetime: "1h" # connectAttributes: # tx_isolation: 'READ-COMMITTED' visibility: driver: "cassandra" cassandra: hosts: [] # port: 9042 keyspace: "temporal_visibility" user: "user" password: "password" existingSecret: "" # datacenter: "us-east-1a" # maxQPS: 1000 # maxConns: 2 replicationFactor: 1 consistency: default: consistency: "local_quorum" serialConsistency: "local_serial" sql: driver: "mysql" host: "mysql" port: 3306 database: "temporal_visibility" user: "temporal" password: "temporal" existingSecret: "" secretName: "" maxConns: 20 maxConnLifetime: "1h" # connectAttributes: # tx_isolation: 'READ-COMMITTED' frontend: service: annotations: {} # Evaluated as template type: ClusterIP port: 7233 membershipPort: 6933 metrics: annotations: enabled: true serviceMonitor: {} # enabled: false prometheus: {} # timerType: histogram podAnnotations: {} podLabels: {} resources: {} nodeSelector: {} tolerations: [] affinity: {} additionalEnv: [] containerSecurityContext: {} topologySpreadConstraints: {} podDisruptionBudget: {} history: service: # type: ClusterIP port: 7234 membershipPort: 6934 metrics: annotations: enabled: true serviceMonitor: {} # enabled: false prometheus: {} # timerType: histogram podAnnotations: {} podLabels: {} resources: {} nodeSelector: {} tolerations: [] affinity: {} additionalEnv: [] containerSecurityContext: {} topologySpreadConstraints: {} podDisruptionBudget: {} matching: service: # type: ClusterIP port: 7235 membershipPort: 6935 metrics: annotations: enabled: false serviceMonitor: {} # enabled: false prometheus: {} # timerType: histogram podAnnotations: {} podLabels: {} resources: {} nodeSelector: {} tolerations: [] affinity: {} additionalEnv: [] containerSecurityContext: {} topologySpreadConstraints: {} podDisruptionBudget: {} worker: service: # type: ClusterIP port: 7239 membershipPort: 6939 metrics: annotations: enabled: true serviceMonitor: {} # enabled: false prometheus: {} # timerType: histogram podAnnotations: {} podLabels: {} resources: {} nodeSelector: {} tolerations: [] affinity: {} additionalEnv: [] containerSecurityContext: {} topologySpreadConstraints: {} podDisruptionBudget: {} admintools: enabled: true image: repository: temporalio/admin-tools tag: 1.22.4 pullPolicy: IfNotPresent service: type: ClusterIP port: 22 annotations: {} podLabels: {} podAnnotations: {} nodeSelector: {} tolerations: [] affinity: {} additionalEnv: [] resources: {} containerSecurityContext: {} securityContext: {} podDisruptionBudget: {} web: enabled: true config: # server/config.yml file content auth: enabled: false routing: default_to_namespace: # internal use only issue_report_link: https://github.com/temporalio/web/issues/new/choose # set this field if you need to direct people to internal support forums replicaCount: 1 image: repository: temporalio/ui tag: 2.16.2 pullPolicy: IfNotPresent service: # set type to NodePort if access to web needs access from outside the cluster # for more info see https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types type: ClusterIP port: 8080 annotations: {} # loadBalancerIP: ingress: enabled: false # className: annotations: {} # kubernetes.io/ingress.class: traefik # ingress.kubernetes.io/ssl-redirect: "false" # traefik.frontend.rule.type: PathPrefix hosts: - "/" # - "domain.com/xyz" # - "domain.com" tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local podAnnotations: {} podLabels: {} resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} additionalVolumes: [] additionalVolumeMounts: [] additionalEnv: [] containerSecurityContext: {} securityContext: {} schema: setup: enabled: true backoffLimit: 100 update: enabled: true backoffLimit: 100 resources: {} containerSecurityContext: {} securityContext: {} elasticsearch: enabled: true replicas: 3 persistence: enabled: false imageTag: 7.17.3 host: elasticsearch-master-headless scheme: http port: 9200 version: "v7" logLevel: "error" username: "" password: "" visibilityIndex: "temporal_visibility_v1_dev" prometheus: enabled: true nodeExporter: enabled: false grafana: enabled: true replicas: 1 testFramework: enabled: false rbac: create: false pspEnabled: false namespaced: true dashboardProviders: dashboardproviders.yaml: apiVersion: 1 providers: - name: "default" orgId: 1 folder: "" type: file disableDeletion: false editable: true options: path: /var/lib/grafana/dashboards/default datasources: datasources.yaml: apiVersion: 1 datasources: - name: TemporalMetrics type: prometheus url: http://{{ .Release.Name }}-prometheus-server access: proxy isDefault: true dashboards: default: server-general-github: url: https://raw.githubusercontent.com/temporalio/dashboards/helm/server/server-general.json datasource: TemporalMetrics sdk-general-github: url: https://raw.githubusercontent.com/temporalio/dashboards/helm/sdk/sdk-general.json datasource: TemporalMetrics misc-advanced-visibility-specific-github: url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/advanced-visibility-specific.json datasource: TemporalMetrics misc-clustermonitoring-kubernetes-github: url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/clustermonitoring-kubernetes.json datasource: TemporalMetrics misc-frontend-service-specific-github: url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/frontend-service-specific.json datasource: TemporalMetrics misc-history-service-specific-github: url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/history-service-specific.json datasource: TemporalMetrics misc-matching-service-specific-github: url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/matching-service-specific.json datasource: TemporalMetrics misc-worker-service-specific-github: url: https://raw.githubusercontent.com/temporalio/dashboards/helm/misc/worker-service-specific.json datasource: TemporalMetrics cassandra: enabled: true persistence: enabled: false image: repo: cassandra tag: 3.11.3 pullPolicy: IfNotPresent config: cluster_size: 3 ports: cql: 9042 num_tokens: 4 max_heap_size: 512M heap_new_size: 128M seed_size: 0 service: type: ClusterIP mysql: enabled: false