From 68a87b274305c70871723f7a1215b98e2c54af19 Mon Sep 17 00:00:00 2001 From: Alexander Date: Mon, 22 Jan 2024 09:25:58 +0100 Subject: [PATCH] Video replay pipeline split (#1848) * feat(backend): split * feat(docker): added ffmpeg to canvas-maker service --- backend/Dockerfile | 2 + backend/cmd/canvas-handler/main.go | 107 ++++++++++++++ backend/cmd/canvas-maker/main.go | 95 +++++++++++++ backend/cmd/imagestorage/main.go | 75 ++-------- backend/cmd/videostorage/main.go | 69 ++------- .../charts/canvas-handler/.helmignore | 23 +++ .../charts/canvas-handler/Chart.yaml | 24 ++++ .../charts/canvas-handler/templates/NOTES.txt | 22 +++ .../canvas-handler/templates/_helpers.tpl | 62 +++++++++ .../canvas-handler/templates/deployment.yaml | 131 ++++++++++++++++++ .../charts/canvas-handler/templates/hpa.yaml | 29 ++++ .../canvas-handler/templates/ingress.yaml | 62 +++++++++ .../canvas-handler/templates/service.yaml | 18 +++ .../templates/serviceMonitor.yaml | 18 +++ .../templates/serviceaccount.yaml | 13 ++ .../templates/tests/test-connection.yaml | 15 ++ .../charts/canvas-handler/values.yaml | 124 +++++++++++++++++ .../charts/canvas-maker/.helmignore | 23 +++ .../openreplay/charts/canvas-maker/Chart.yaml | 24 ++++ .../charts/canvas-maker/templates/NOTES.txt | 22 +++ .../canvas-maker/templates/_helpers.tpl | 62 +++++++++ .../canvas-maker/templates/deployment.yaml | 131 ++++++++++++++++++ .../charts/canvas-maker/templates/hpa.yaml | 29 ++++ .../canvas-maker/templates/ingress.yaml | 62 +++++++++ .../canvas-maker/templates/service.yaml | 18 +++ .../templates/serviceMonitor.yaml | 18 +++ .../templates/serviceaccount.yaml | 13 ++ .../templates/tests/test-connection.yaml | 15 ++ .../charts/canvas-maker/values.yaml | 124 +++++++++++++++++ 29 files changed, 1305 insertions(+), 125 deletions(-) create mode 100644 backend/cmd/canvas-handler/main.go create mode 100644 backend/cmd/canvas-maker/main.go create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/.helmignore create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/Chart.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/templates/NOTES.txt create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/templates/_helpers.tpl create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/templates/deployment.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/templates/hpa.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/templates/ingress.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/templates/service.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/templates/serviceMonitor.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/templates/serviceaccount.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/templates/tests/test-connection.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-handler/values.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/.helmignore create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/Chart.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/templates/NOTES.txt create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/templates/_helpers.tpl create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/templates/deployment.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/templates/hpa.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/templates/ingress.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/templates/service.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/templates/serviceMonitor.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/templates/serviceaccount.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/templates/tests/test-connection.yaml create mode 100644 scripts/helmcharts/openreplay/charts/canvas-maker/values.yaml diff --git a/backend/Dockerfile b/backend/Dockerfile index d14919f913..8421e4b80e 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -122,6 +122,8 @@ RUN if [ "$SERVICE_NAME" = "http" ]; then \ wget https://static.openreplay.com/geoip/GeoLite2-City.mmdb -O "$MAXMINDDB_FILE"; \ elif [ "$SERVICE_NAME" = "videostorage" ]; then \ apk add --no-cache ffmpeg; \ + elif [ "$SERVICE_NAME" = "canvas-maker" ]; then \ + apk add --no-cache ffmpeg; \ fi diff --git a/backend/cmd/canvas-handler/main.go b/backend/cmd/canvas-handler/main.go new file mode 100644 index 0000000000..6db3d8d030 --- /dev/null +++ b/backend/cmd/canvas-handler/main.go @@ -0,0 +1,107 @@ +package main + +import ( + "fmt" + "log" + "os" + "os/signal" + "syscall" + "time" + + config "openreplay/backend/internal/config/imagestorage" + "openreplay/backend/internal/imagestorage" + "openreplay/backend/pkg/messages" + "openreplay/backend/pkg/metrics" + storageMetrics "openreplay/backend/pkg/metrics/imagestorage" + "openreplay/backend/pkg/queue" +) + +func main() { + m := metrics.New() + m.Register(storageMetrics.List()) + + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) + + cfg := config.New() + + srv, err := imagestorage.New(cfg) + if err != nil { + log.Printf("can't init storage service: %s", err) + return + } + + producer := queue.NewProducer(cfg.MessageSizeLimit, true) + + canvasConsumer := queue.NewConsumer( + cfg.GroupImageStorage, + []string{ + cfg.TopicCanvasImages, + }, + messages.NewImagesMessageIterator(func(data []byte, sessID uint64) { + checkSessionEnd := func(data []byte) (messages.Message, error) { + reader := messages.NewBytesReader(data) + msgType, err := reader.ReadUint() + if err != nil { + return nil, err + } + if msgType != messages.MsgSessionEnd { + return nil, fmt.Errorf("not a session end message") + } + msg, err := messages.ReadMessage(msgType, reader) + if err != nil { + return nil, fmt.Errorf("read message err: %s", err) + } + return msg, nil + } + + if msg, err := checkSessionEnd(data); err == nil { + sessEnd := msg.(*messages.SessionEnd) + // Received session end + if list, err := srv.PrepareCanvas(sessID); err != nil { + log.Printf("can't prepare canvas: %s", err) + } else { + for _, name := range list { + sessEnd.EncryptionKey = name + if err := producer.Produce(cfg.TopicCanvasTrigger, sessID, sessEnd.Encode()); err != nil { + log.Printf("can't send session end signal to video service: %s", err) + } + } + } + } else { + if err := srv.ProcessCanvas(sessID, data); err != nil { + log.Printf("can't process canvas image: %s", err) + } + } + }, nil, true), + false, + cfg.MessageSizeLimit, + ) + + log.Printf("Canvas handler service started\n") + + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + + counterTick := time.Tick(time.Second * 30) + for { + select { + case sig := <-sigchan: + log.Printf("Caught signal %v: terminating\n", sig) + srv.Wait() + canvasConsumer.Close() + os.Exit(0) + case <-counterTick: + srv.Wait() + if err := canvasConsumer.Commit(); err != nil { + log.Printf("can't commit messages: %s", err) + } + case msg := <-canvasConsumer.Rebalanced(): + log.Println(msg) + default: + err = canvasConsumer.ConsumeNext() + if err != nil { + log.Fatalf("Error on images consumption: %v", err) + } + } + } +} diff --git a/backend/cmd/canvas-maker/main.go b/backend/cmd/canvas-maker/main.go new file mode 100644 index 0000000000..c1b442cb63 --- /dev/null +++ b/backend/cmd/canvas-maker/main.go @@ -0,0 +1,95 @@ +package main + +import ( + "log" + "os" + "os/signal" + "strconv" + "strings" + "syscall" + "time" + + config "openreplay/backend/internal/config/videostorage" + "openreplay/backend/internal/videostorage" + "openreplay/backend/pkg/messages" + "openreplay/backend/pkg/metrics" + storageMetrics "openreplay/backend/pkg/metrics/videostorage" + "openreplay/backend/pkg/objectstorage/store" + "openreplay/backend/pkg/queue" +) + +func main() { + m := metrics.New() + m.Register(storageMetrics.List()) + + log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) + + cfg := config.New() + + objStore, err := store.NewStore(&cfg.ObjectsConfig) + if err != nil { + log.Fatalf("can't init object storage: %s", err) + } + srv, err := videostorage.New(cfg, objStore) + if err != nil { + log.Printf("can't init storage service: %s", err) + return + } + + workDir := cfg.FSDir + + canvasConsumer := queue.NewConsumer( + cfg.GroupVideoStorage, + []string{ + cfg.TopicCanvasTrigger, + }, + messages.NewMessageIterator( + func(msg messages.Message) { + sesEnd := msg.(*messages.SessionEnd) + filePath := workDir + "/canvas/" + strconv.FormatUint(sesEnd.SessionID(), 10) + "/" + canvasMix := sesEnd.EncryptionKey // dirty hack to use encryption key as canvas mix holder (only between canvas handler and canvas maker) + if canvasMix == "" { + log.Printf("no canvas mix for session: %d", sesEnd.SessionID()) + return + } + if err := srv.Process(sesEnd.SessionID(), filePath, canvasMix); err != nil { + if !strings.Contains(err.Error(), "no such file or directory") { + log.Printf("upload session err: %s, sessID: %d", err, msg.SessionID()) + } + } + }, + []int{messages.MsgSessionEnd}, + true, + ), + false, + cfg.MessageSizeLimit, + ) + + log.Printf("Canvas maker service started\n") + + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + + counterTick := time.Tick(time.Second * 30) + for { + select { + case sig := <-sigchan: + log.Printf("Caught signal %v: terminating\n", sig) + srv.Wait() + canvasConsumer.Close() + os.Exit(0) + case <-counterTick: + srv.Wait() + if err := canvasConsumer.Commit(); err != nil { + log.Printf("can't commit messages: %s", err) + } + case msg := <-canvasConsumer.Rebalanced(): + log.Println(msg) + default: + err = canvasConsumer.ConsumeNext() + if err != nil { + log.Fatalf("Error on end event consumption: %v", err) + } + } + } +} diff --git a/backend/cmd/imagestorage/main.go b/backend/cmd/imagestorage/main.go index 5265fe862e..338463f33f 100644 --- a/backend/cmd/imagestorage/main.go +++ b/backend/cmd/imagestorage/main.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "log" "os" "os/signal" @@ -30,61 +29,14 @@ func main() { return } - producer := queue.NewProducer(cfg.MessageSizeLimit, true) - - //consumer := queue.NewConsumer( - // cfg.GroupImageStorage, - // []string{ - // cfg.TopicRawImages, - // }, - // messages.NewImagesMessageIterator(func(data []byte, sessID uint64) { - // if err := srv.Process(sessID, data); err != nil { - // log.Printf("can't process image: %s", err) - // } - // }, nil, true), - // false, - // cfg.MessageSizeLimit, - //) - - canvasConsumer := queue.NewConsumer( + consumer := queue.NewConsumer( cfg.GroupImageStorage, []string{ - cfg.TopicCanvasImages, + cfg.TopicRawImages, }, messages.NewImagesMessageIterator(func(data []byte, sessID uint64) { - checkSessionEnd := func(data []byte) (messages.Message, error) { - reader := messages.NewBytesReader(data) - msgType, err := reader.ReadUint() - if err != nil { - return nil, err - } - if msgType != messages.MsgSessionEnd { - return nil, fmt.Errorf("not a session end message") - } - msg, err := messages.ReadMessage(msgType, reader) - if err != nil { - return nil, fmt.Errorf("read message err: %s", err) - } - return msg, nil - } - - if msg, err := checkSessionEnd(data); err == nil { - sessEnd := msg.(*messages.SessionEnd) - // Received session end - if list, err := srv.PrepareCanvas(sessID); err != nil { - log.Printf("can't prepare canvas: %s", err) - } else { - for _, name := range list { - sessEnd.EncryptionKey = name - if err := producer.Produce(cfg.TopicCanvasTrigger, sessID, sessEnd.Encode()); err != nil { - log.Printf("can't send session end signal to video service: %s", err) - } - } - } - } else { - if err := srv.ProcessCanvas(sessID, data); err != nil { - log.Printf("can't process canvas image: %s", err) - } + if err := srv.Process(sessID, data); err != nil { + log.Printf("can't process image: %s", err) } }, nil, true), false, @@ -102,28 +54,17 @@ func main() { case sig := <-sigchan: log.Printf("Caught signal %v: terminating\n", sig) srv.Wait() - // close all consumers - //consumer.Close() - canvasConsumer.Close() + consumer.Close() os.Exit(0) case <-counterTick: srv.Wait() - //if err := consumer.Commit(); err != nil { - // log.Printf("can't commit messages: %s", err) - //} - if err := canvasConsumer.Commit(); err != nil { + if err := consumer.Commit(); err != nil { log.Printf("can't commit messages: %s", err) } - //case msg := <-consumer.Rebalanced(): - // log.Println(msg) - case msg := <-canvasConsumer.Rebalanced(): + case msg := <-consumer.Rebalanced(): log.Println(msg) default: - //err := consumer.ConsumeNext() - //if err != nil { - // log.Fatalf("Error on images consumption: %v", err) - //} - err = canvasConsumer.ConsumeNext() + err := consumer.ConsumeNext() if err != nil { log.Fatalf("Error on images consumption: %v", err) } diff --git a/backend/cmd/videostorage/main.go b/backend/cmd/videostorage/main.go index 60bda6dcba..948a1ca11b 100644 --- a/backend/cmd/videostorage/main.go +++ b/backend/cmd/videostorage/main.go @@ -5,7 +5,6 @@ import ( "os" "os/signal" "strconv" - "strings" "syscall" "time" @@ -38,53 +37,20 @@ func main() { workDir := cfg.FSDir - //consumer := queue.NewConsumer( - // cfg.GroupVideoStorage, - // []string{ - // cfg.TopicMobileTrigger, - // }, - // messages.NewMessageIterator( - // func(msg messages.Message) { - // sesEnd := msg.(*messages.IOSSessionEnd) - // log.Printf("skipped mobile session end: %d", sesEnd.SessionID()) - // //log.Printf("recieved mobile session end: %d", sesEnd.SessionID()) - // //if err := srv.Process(sesEnd.SessionID(), workDir+"/screenshots/"+strconv.FormatUint(sesEnd.SessionID(), 10)+"/", false); err != nil { - // // log.Printf("upload session err: %s, sessID: %d", err, msg.SessionID()) - // //} - // }, - // []int{messages.MsgIOSSessionEnd}, - // true, - // ), - // false, - // cfg.MessageSizeLimit, - //) - - // Debug: global counter for canvases - canvasCount := 0 - - canvasConsumer := queue.NewConsumer( + consumer := queue.NewConsumer( cfg.GroupVideoStorage, []string{ - cfg.TopicCanvasTrigger, + cfg.TopicMobileTrigger, }, messages.NewMessageIterator( func(msg messages.Message) { - sesEnd := msg.(*messages.SessionEnd) - filePath := workDir + "/canvas/" + strconv.FormatUint(sesEnd.SessionID(), 10) + "/" - canvasMix := sesEnd.EncryptionKey // dirty hack to use encryption key as canvas mix holder (only between canvas handler and canvas maker) - if canvasMix == "" { - log.Printf("no canvas mix for session: %d", sesEnd.SessionID()) - return - } - if err := srv.Process(sesEnd.SessionID(), filePath, canvasMix); err != nil { - if !strings.Contains(err.Error(), "no such file or directory") { - log.Printf("upload session err: %s, sessID: %d", err, msg.SessionID()) - } - } else { - canvasCount++ + sesEnd := msg.(*messages.IOSSessionEnd) + log.Printf("recieved mobile session end: %d", sesEnd.SessionID()) + if err := srv.Process(sesEnd.SessionID(), workDir+"/screenshots/"+strconv.FormatUint(sesEnd.SessionID(), 10)+"/", ""); err != nil { + log.Printf("upload session err: %s, sessID: %d", err, msg.SessionID()) } }, - []int{messages.MsgSessionEnd}, + []int{messages.MsgIOSSessionEnd}, true, ), false, @@ -102,30 +68,17 @@ func main() { case sig := <-sigchan: log.Printf("Caught signal %v: terminating\n", sig) srv.Wait() - //consumer.Close() - canvasConsumer.Close() + consumer.Close() os.Exit(0) case <-counterTick: srv.Wait() - //if err := consumer.Commit(); err != nil { - // log.Printf("can't commit messages: %s", err) - //} - if err := canvasConsumer.Commit(); err != nil { + if err := consumer.Commit(); err != nil { log.Printf("can't commit messages: %s", err) } - // Debug log - log.Printf("canvasCount: %d", canvasCount) - canvasCount = 0 - //case msg := <-consumer.Rebalanced(): - // log.Println(msg) - case msg := <-canvasConsumer.Rebalanced(): + case msg := <-consumer.Rebalanced(): log.Println(msg) default: - //err = consumer.ConsumeNext() - //if err != nil { - // log.Fatalf("Error on end event consumption: %v", err) - //} - err = canvasConsumer.ConsumeNext() + err = consumer.ConsumeNext() if err != nil { log.Fatalf("Error on end event consumption: %v", err) } diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/.helmignore b/scripts/helmcharts/openreplay/charts/canvas-handler/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/Chart.yaml b/scripts/helmcharts/openreplay/charts/canvas-handler/Chart.yaml new file mode 100644 index 0000000000..d208bfaa13 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: canvas-handler +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.16.0" diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/NOTES.txt new file mode 100644 index 0000000000..96b287832b --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "canvas-handler.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "canvas-handler.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "canvas-handler.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "canvas-handler.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/_helpers.tpl new file mode 100644 index 0000000000..59bc66bf27 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "canvas-handler.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "canvas-handler.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "canvas-handler.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "canvas-handler.labels" -}} +helm.sh/chart: {{ include "canvas-handler.chart" . }} +{{ include "canvas-handler.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "canvas-handler.selectorLabels" -}} +app.kubernetes.io/name: {{ include "canvas-handler.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "canvas-handler.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "canvas-handler.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/deployment.yaml new file mode 100644 index 0000000000..f268a081ce --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/deployment.yaml @@ -0,0 +1,131 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "canvas-handler.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-handler.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "canvas-handler.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "canvas-handler.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "canvas-handler.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + shareProcessNamespace: true + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.healthCheck}} + {{- .Values.healthCheck | toYaml | nindent 10}} + {{- end}} + env: + - name: AWS_ACCESS_KEY_ID + {{- if .Values.global.s3.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ .Values.global.s3.existingSecret }} + key: access-key + {{- else }} + value: {{ .Values.global.s3.accessKey }} + {{- end }} + - name: AWS_SECRET_ACCESS_KEY + {{- if .Values.global.s3.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ .Values.global.s3.existingSecret }} + key: secret-key + {{- else }} + value: {{ .Values.global.s3.secretKey }} + {{- end }} + - name: AWS_ENDPOINT + value: '{{ .Values.global.s3.endpoint }}' + - name: AWS_REGION + value: '{{ .Values.global.s3.region }}' + - name: BUCKET_NAME + value: {{ .Values.global.s3.recordingsBucket }} + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + {{- range $key, $val := .Values.service.ports }} + - name: {{ $key }} + containerPort: {{ $val }} + protocol: TCP + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: datadir + mountPath: /mnt/efs + {{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 10 }} + {{- with .Values.persistence.mounts }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- if eq (tpl .Values.pvc.name . ) "hostPath" }} + volumes: + {{- with .Values.persistence.volumes }} + {{- toYaml . | nindent 6 }} + {{- end }} + - name: datadir + hostPath: + # Ensure the file directory is created. + path: {{ .Values.pvc.hostMountPath }} + type: DirectoryOrCreate + {{- else }} + volumes: + {{- with .Values.persistence.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + - name: datadir + persistentVolumeClaim: + claimName: "{{ tpl .Values.pvc.name . }}" + {{- end }} + {{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/hpa.yaml new file mode 100644 index 0000000000..6ead7769ee --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/hpa.yaml @@ -0,0 +1,29 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "canvas-handler.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-handler.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "canvas-handler.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/ingress.yaml new file mode 100644 index 0000000000..b4956e41d1 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/ingress.yaml @@ -0,0 +1,62 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "canvas-handler.fullname" . -}} +{{- $svcPort := .Values.service.ports.http -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-handler.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/templates/service.yaml b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/service.yaml new file mode 100644 index 0000000000..6153fce307 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "canvas-handler.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-handler.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + {{- range $key, $val := .Values.service.ports }} + - port: {{ $val }} + targetPort: {{ $key }} + protocol: TCP + name: {{ $key }} + {{- end}} + selector: + {{- include "canvas-handler.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/templates/serviceMonitor.yaml b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/serviceMonitor.yaml new file mode 100644 index 0000000000..a1d73413c8 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/serviceMonitor.yaml @@ -0,0 +1,18 @@ +{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.serviceMonitor.enabled ) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "canvas-handler.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-handler.labels" . | nindent 4 }} + {{- if .Values.serviceMonitor.additionalLabels }} + {{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + {{- .Values.serviceMonitor.scrapeConfigs | toYaml | nindent 4 }} + selector: + matchLabels: + {{- include "canvas-handler.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/serviceaccount.yaml new file mode 100644 index 0000000000..ce641f58d0 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "canvas-handler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-handler.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..66eab47076 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "imagestorage.fullname" . }}-test-connection" + labels: + {{- include "imagestorage.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "imagestorage.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/canvas-handler/values.yaml b/scripts/helmcharts/openreplay/charts/canvas-handler/values.yaml new file mode 100644 index 0000000000..776b470ada --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-handler/values.yaml @@ -0,0 +1,124 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: "{{ .Values.global.openReplayContainerRegistry }}/canvas-handler" + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "canvas-handler" +fullnameOverride: "canvas-handler-openreplay" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +securityContext: + runAsUser: 1001 + runAsGroup: 1001 +podSecurityContext: + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + fsGroupChangePolicy: "OnRootMismatch" + +#securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + ports: + http: 9000 + metrics: 8888 + +serviceMonitor: + enabled: true + additionalLabels: + release: observability + scrapeConfigs: + - port: metrics + honorLabels: true + interval: 15s + path: /metrics + scheme: http + scrapeTimeout: 10s + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + FS_CLEAN_HRS: 24 + +pvc: + # This can be either persistentVolumeClaim or hostPath. + # In case of pvc, you'll have to provide the pvc name. + # For example + # name: openreplay-efs + name: "{{ .Values.global.pvcRWXName }}" + hostMountPath: /openreplay/storage/nfs + +persistence: {} + # # Spec of spec.template.spec.containers[*].volumeMounts + # mounts: + # - name: kafka-ssl + # mountPath: /opt/kafka/ssl + # # Spec of spec.template.spec.volumes + # volumes: + # - name: kafka-ssl + # secret: + # secretName: kafka-ssl + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/.helmignore b/scripts/helmcharts/openreplay/charts/canvas-maker/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/Chart.yaml b/scripts/helmcharts/openreplay/charts/canvas-maker/Chart.yaml new file mode 100644 index 0000000000..71b06e3a00 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: canvas-maker +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.16.0" diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/NOTES.txt new file mode 100644 index 0000000000..d0e81087b5 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "canvas-maker.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "canvas-maker.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "canvas-maker.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "canvas-maker.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/_helpers.tpl new file mode 100644 index 0000000000..0fe32f9e74 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "canvas-maker.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "canvas-maker.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "canvas-maker.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "canvas-maker.labels" -}} +helm.sh/chart: {{ include "canvas-maker.chart" . }} +{{ include "canvas-maker.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "canvas-maker.selectorLabels" -}} +app.kubernetes.io/name: {{ include "canvas-maker.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "canvas-maker.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "canvas-maker.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/deployment.yaml new file mode 100644 index 0000000000..5526fb732e --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/deployment.yaml @@ -0,0 +1,131 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "canvas-maker.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-maker.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "canvas-maker.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "canvas-maker.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "canvas-maker.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + shareProcessNamespace: true + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.healthCheck}} + {{- .Values.healthCheck | toYaml | nindent 10}} + {{- end}} + env: + - name: AWS_ACCESS_KEY_ID + {{- if .Values.global.s3.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ .Values.global.s3.existingSecret }} + key: access-key + {{- else }} + value: {{ .Values.global.s3.accessKey }} + {{- end }} + - name: AWS_SECRET_ACCESS_KEY + {{- if .Values.global.s3.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ .Values.global.s3.existingSecret }} + key: secret-key + {{- else }} + value: {{ .Values.global.s3.secretKey }} + {{- end }} + - name: AWS_ENDPOINT + value: '{{ .Values.global.s3.endpoint }}' + - name: AWS_REGION + value: '{{ .Values.global.s3.region }}' + - name: BUCKET_NAME + value: {{ .Values.global.s3.recordingsBucket }} + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + {{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }} + {{- range $key, $val := .Values.global.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + {{- range $key, $val := .Values.service.ports }} + - name: {{ $key }} + containerPort: {{ $val }} + protocol: TCP + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: datadir + mountPath: /mnt/efs + {{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 10 }} + {{- with .Values.persistence.mounts }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- if eq (tpl .Values.pvc.name . ) "hostPath" }} + volumes: + {{- with .Values.persistence.volumes }} + {{- toYaml . | nindent 6 }} + {{- end }} + - name: datadir + hostPath: + # Ensure the file directory is created. + path: {{ .Values.pvc.hostMountPath }} + type: DirectoryOrCreate + {{- else }} + volumes: + {{- with .Values.persistence.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + - name: datadir + persistentVolumeClaim: + claimName: "{{ tpl .Values.pvc.name . }}" + {{- end }} + {{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/hpa.yaml new file mode 100644 index 0000000000..9b36a8ff64 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/hpa.yaml @@ -0,0 +1,29 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "canvas-maker.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-maker.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "canvas-maker.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/ingress.yaml new file mode 100644 index 0000000000..d5bc81c60d --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/ingress.yaml @@ -0,0 +1,62 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "canvas-maker.fullname" . -}} +{{- $svcPort := .Values.service.ports.http -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-maker.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/templates/service.yaml b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/service.yaml new file mode 100644 index 0000000000..84ff898b3a --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "canvas-maker.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-maker.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + {{- range $key, $val := .Values.service.ports }} + - port: {{ $val }} + targetPort: {{ $key }} + protocol: TCP + name: {{ $key }} + {{- end}} + selector: + {{- include "canvas-maker.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/templates/serviceMonitor.yaml b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/serviceMonitor.yaml new file mode 100644 index 0000000000..aac0b2ab02 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/serviceMonitor.yaml @@ -0,0 +1,18 @@ +{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.serviceMonitor.enabled ) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "canvas-maker.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-maker.labels" . | nindent 4 }} + {{- if .Values.serviceMonitor.additionalLabels }} + {{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + {{- .Values.serviceMonitor.scrapeConfigs | toYaml | nindent 4 }} + selector: + matchLabels: + {{- include "canvas-maker.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/serviceaccount.yaml new file mode 100644 index 0000000000..54ae190b56 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "canvas-maker.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "canvas-maker.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..d6200b3232 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "canvas-maker.fullname" . }}-test-connection" + labels: + {{- include "canvas-maker.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "canvas-maker.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/canvas-maker/values.yaml b/scripts/helmcharts/openreplay/charts/canvas-maker/values.yaml new file mode 100644 index 0000000000..f27d1ab444 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/canvas-maker/values.yaml @@ -0,0 +1,124 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: "{{ .Values.global.openReplayContainerRegistry }}/canvas-maker" + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "canvas-maker" +fullnameOverride: "canvas-maker-openreplay" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +securityContext: + runAsUser: 1001 + runAsGroup: 1001 +podSecurityContext: + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + fsGroupChangePolicy: "OnRootMismatch" + +#securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + ports: + http: 9000 + metrics: 8888 + +serviceMonitor: + enabled: true + additionalLabels: + release: observability + scrapeConfigs: + - port: metrics + honorLabels: true + interval: 15s + path: /metrics + scheme: http + scrapeTimeout: 10s + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + FS_CLEAN_HRS: 24 + +pvc: + # This can be either persistentVolumeClaim or hostPath. + # In case of pvc, you'll have to provide the pvc name. + # For example + # name: openreplay-efs + name: "{{ .Values.global.pvcRWXName }}" + hostMountPath: /openreplay/storage/nfs + +persistence: {} + # # Spec of spec.template.spec.containers[*].volumeMounts + # mounts: + # - name: kafka-ssl + # mountPath: /opt/kafka/ssl + # # Spec of spec.template.spec.volumes + # volumes: + # - name: kafka-ssl + # secret: + # secretName: kafka-ssl + +nodeSelector: {} + +tolerations: [] + +affinity: {}