From b7b4fc499b5274cd71b6b72f93ab4ba8199437fe Mon Sep 17 00:00:00 2001 From: gonzalezzfelipe Date: Thu, 7 Nov 2024 18:57:49 -0300 Subject: [PATCH] feat: Init from snapshot --- .github/workflows/docker.yml | 1 + .github/workflows/hydra.yml | 5 +---- .github/workflows/init.yml | 5 +---- bootstrap/stage1/efs.tf | 36 ---------------------------------- bootstrap/stage1/main.tf | 4 ---- bootstrap/stage2/deployment.tf | 20 +++++++++++++++++++ bootstrap/stage2/main.tf | 17 ++++++++++++++++ docker/dockerfile.init | 9 ++++++++- docker/entrypoint.sh | 12 ++++++++++-- src/config.rs | 10 ++++++++++ src/custom_resource.rs | 36 +++++++++++++++++++++++++++------- 11 files changed, 97 insertions(+), 58 deletions(-) delete mode 100644 bootstrap/stage1/efs.tf diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index dbcdc0f..8376bef 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,5 +1,6 @@ name: Docker on: + workflow_dispatch: {} push: branches: - "main" diff --git a/.github/workflows/hydra.yml b/.github/workflows/hydra.yml index ec09785..5a21f4f 100644 --- a/.github/workflows/hydra.yml +++ b/.github/workflows/hydra.yml @@ -7,10 +7,7 @@ on: paths: - ".github/workflows/hydra.yml" - "docker/dockerfile.hydra" - workflow_dispatch: - inputs: - mumak_version: - required: false + workflow_dispatch: {} jobs: build-images: diff --git a/.github/workflows/init.yml b/.github/workflows/init.yml index 9155568..7d26b5f 100644 --- a/.github/workflows/init.yml +++ b/.github/workflows/init.yml @@ -7,10 +7,7 @@ on: paths: - ".github/workflows/init.yml" - "docker/dockerfile.init" - workflow_dispatch: - inputs: - mumak_version: - required: false + workflow_dispatch: {} jobs: build-images: diff --git a/bootstrap/stage1/efs.tf b/bootstrap/stage1/efs.tf deleted file mode 100644 index 67bf724..0000000 --- a/bootstrap/stage1/efs.tf +++ /dev/null @@ -1,36 +0,0 @@ -resource "kubernetes_storage_class" "efs_storage_class" { - metadata { - name = "efs-sc" - } - storage_provisioner = "efs.csi.aws.com" - parameters = { - provisioningMode = "efs-ap" - fileSystemId = var.efs_fs_id - directoryPerms = "777" - basePath = "/hydra-node-persistance" - subPathPattern = "$${.PVC.name}" - ensureUniqueDirectory = "true" - } -} - -resource "kubernetes_persistent_volume" "efs_pv" { - metadata { - name = "hydra-doom-persistence" - } - - spec { - capacity = { - storage = "100Gi" - } - volume_mode = "Filesystem" - access_modes = ["ReadWriteMany"] - persistent_volume_reclaim_policy = "Retain" - storage_class_name = "efs-cs" - persistent_volume_source { - csi { - driver = "efs.csi.aws.com" - volume_handle = var.efs_fs_id - } - } - } -} diff --git a/bootstrap/stage1/main.tf b/bootstrap/stage1/main.tf index b8533e9..e69de29 100644 --- a/bootstrap/stage1/main.tf +++ b/bootstrap/stage1/main.tf @@ -1,4 +0,0 @@ -variable "efs_fs_id" { - type = string - description = "ID of EFS resource to use as persistance." -} diff --git a/bootstrap/stage2/deployment.tf b/bootstrap/stage2/deployment.tf index 97c7dca..168bd28 100644 --- a/bootstrap/stage2/deployment.tf +++ b/bootstrap/stage2/deployment.tf @@ -105,6 +105,26 @@ resource "kubernetes_deployment_v1" "operator" { value = var.dmtr_port_name } + env { + name = "INIT_IMAGE" + value = var.init_image + } + + env { + name = "BUCKET" + value = var.bucket + } + + env { + name = "INIT_AWS_ACCESS_KEY_ID" + value = var.init_aws_access_key_id + } + + env { + name = "INIT_AWS_SECRET_ACCESS_KEY" + value = var.init_aws_secret_access_key + } + resources { limits = { cpu = var.resources.limits.cpu diff --git a/bootstrap/stage2/main.tf b/bootstrap/stage2/main.tf index 2dc6437..77b4d9a 100644 --- a/bootstrap/stage2/main.tf +++ b/bootstrap/stage2/main.tf @@ -69,6 +69,23 @@ variable "dmtr_port_name" { type = string } +variable "init_image" { + type = string +} + +variable "bucket" { + type = string + default = "hydradoomsnapshots" +} + +variable "init_aws_access_key_id" { + type = string +} + +variable "init_aws_secret_access_key" { + type = string +} + variable "tolerations" { type = list(object({ effect = string diff --git a/docker/dockerfile.init b/docker/dockerfile.init index e8c4860..45dc379 100644 --- a/docker/dockerfile.init +++ b/docker/dockerfile.init @@ -1,4 +1,11 @@ FROM amazon/aws-cli -RUN yum update -y && yum install -y tar gzip +RUN yum update -y && yum install -y tar gzip unzip curl + +WORKDIR /var/hydra-node +RUN curl -L -O https://github.com/cardano-scaling/hydra/releases/download/0.19.0/hydra-x86_64-linux-0.19.0.zip +RUN unzip -d bin hydra-x86_64-linux-0.19.0.zip +RUN cp /var/hydra-node/bin/hydra-node /hydra-node +RUN chmod +x /hydra-node + COPY docker/entrypoint.sh /entrypoint.sh ENTRYPOINT ["sh", "/entrypoint.sh"] diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 4a5779e..3689b49 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -1,2 +1,10 @@ -aws s3 cp "s3://$BUCKET/$KEY" "$DATA_DIR" -tar -xzvf "$DATA_DIR/$KEY" -C "$DATA_DIR" +#!/bin/sh +if aws s3 ls "s3://$BUCKET/$KEY" > /dev/null 2>&1; then + echo "Snapshot exists, downloading..." + aws s3 cp "s3://$BUCKET/$KEY" "$DATA_DIR" + tar -xzvf "$DATA_DIR/$KEY" -C "$DATA_DIR" +else + echo "Snapshot does not exist, generating keys..." + mkdir "$DATA_DIR/keys" + /hydra-node gen-hydra-key --output-file "$DATA_DIR/keys/hydra" +fi diff --git a/src/config.rs b/src/config.rs index e0eddce..3acae87 100644 --- a/src/config.rs +++ b/src/config.rs @@ -12,6 +12,7 @@ pub fn get_config() -> &'static Config { #[derive(Debug, Clone)] pub struct Config { pub image: String, + pub init_image: String, pub open_head_image: String, pub sidecar_image: String, pub configmap: String, @@ -24,6 +25,9 @@ pub struct Config { pub dmtr_project_id: String, pub dmtr_api_key: String, pub dmtr_port_name: String, + pub bucket: String, + pub init_aws_access_key_id: String, + pub init_aws_secret_access_key: String, } impl Config { @@ -43,6 +47,12 @@ impl Config { dmtr_project_id: env::var("DMTR_PROJECT_ID").expect("Missing DMTR_PROJECT_ID env var."), dmtr_api_key: env::var("DMTR_API_KEY").expect("Missing DMTR_API_KEY env var."), dmtr_port_name: env::var("DMTR_PORT_NAME").expect("Missing DMTR_PORT_NAME env var."), + init_image: env::var("INIT_IMAGE").expect("Missing INIT_IMAGE env var."), + bucket: env::var("BUCKET").expect("Missing BUCKET env var."), + init_aws_access_key_id: env::var("INIT_AWS_ACCESS_KEY_ID") + .expect("Missing INIT_AWS_ACCESS_KEY_ID env var."), + init_aws_secret_access_key: env::var("INIT_AWS_SECRET_ACCESS_KEY") + .expect("Missing INIT_AWS_SECRET_ACCESS_KEY env var."), } } } diff --git a/src/custom_resource.rs b/src/custom_resource.rs index 1741f86..ccba783 100644 --- a/src/custom_resource.rs +++ b/src/custom_resource.rs @@ -1,8 +1,8 @@ use k8s_openapi::api::{ apps::v1::{Deployment, DeploymentSpec}, core::v1::{ - ConfigMap, ConfigMapVolumeSource, Container, ContainerPort, EmptyDirVolumeSource, PodSpec, - PodTemplateSpec, SecretVolumeSource, Service, ServicePort, ServiceSpec, Volume, + ConfigMap, ConfigMapVolumeSource, Container, ContainerPort, EmptyDirVolumeSource, EnvVar, + PodSpec, PodTemplateSpec, SecretVolumeSource, Service, ServicePort, ServiceSpec, Volume, VolumeMount, }, networking::v1::{ @@ -327,11 +327,33 @@ impl HydraDoomNode { spec: Some(PodSpec { init_containers: Some(vec![Container { name: "init".to_string(), - image: Some(config.image.clone()), - args: Some(vec![ - "gen-hydra-key".to_string(), - "--output-file".to_string(), - format!("{}/hydra", constants.data_dir), + image: Some(config.init_image.clone()), + env: Some(vec![ + EnvVar { + name: "BUCKET".to_string(), + value: Some(config.bucket.clone()), + ..Default::default() + }, + EnvVar { + name: "KEY".to_string(), + value: Some(format!("{}.tar.gz", self.name_any())), + ..Default::default() + }, + EnvVar { + name: "DATA_DIR".to_string(), + value: Some(constants.data_dir.clone()), + ..Default::default() + }, + EnvVar { + name: "AWS_ACCESS_KEY_ID".to_string(), + value: Some(config.init_aws_access_key_id.clone()), + ..Default::default() + }, + EnvVar { + name: "AWS_SECRET_ACCESS_KEY".to_string(), + value: Some(config.init_aws_secret_access_key.clone()), + ..Default::default() + }, ]), volume_mounts: Some(vec![VolumeMount { name: "data".to_string(),