From da4e83d8219fb02fcec0ac2423ac4fde08adb657 Mon Sep 17 00:00:00 2001 From: gonzalezzfelipe Date: Thu, 7 Nov 2024 20:52:04 -0300 Subject: [PATCH] chore: Add resources to spec --- bootstrap/stage0/cluster.yml | 9 +++- bootstrap/stage1/crd.tf | 40 +++++++++++++++ bootstrap/stage1/metrics_server.tf | 7 +++ playbook/main.tf | 80 +++++++++++++++++++++++------- playbook/pod.yml | 22 +++++++- src/custom_resource.rs | 56 ++++++++++++++++----- 6 files changed, 181 insertions(+), 33 deletions(-) create mode 100644 bootstrap/stage1/metrics_server.tf diff --git a/bootstrap/stage0/cluster.yml b/bootstrap/stage0/cluster.yml index f9d5c17..2c6abd9 100644 --- a/bootstrap/stage0/cluster.yml +++ b/bootstrap/stage0/cluster.yml @@ -21,7 +21,7 @@ managedNodeGroups: instanceTypes: [ t3a.medium, t3.medium ] minSize: 0 maxSize: 2 - desiredCapacity: 1 + desiredCapacity: 2 spot: true availabilityZones: - us-east-1b @@ -34,3 +34,10 @@ fargateProfiles: - namespace: hydra-doom labels: run-on: fargate + - name: fp-dev + tags: + sundae-labs:cost-allocation:Service: hydra-doom + selectors: + - namespace: hydra-doom-dev + labels: + run-on: fargate diff --git a/bootstrap/stage1/crd.tf b/bootstrap/stage1/crd.tf index c05a97f..9c5a10c 100644 --- a/bootstrap/stage1/crd.tf +++ b/bootstrap/stage1/crd.tf @@ -70,6 +70,46 @@ resource "kubernetes_manifest" "customresourcedefinition_hydradoomnodes_hydra_do "nullable" = true "type" = "boolean" } + "resources" = { + "nullable" = true + "properties" = { + "limits" = { + "properties" = { + "cpu" = { + "type" = "string" + } + "memory" = { + "type" = "string" + } + } + "required" = [ + "cpu", + "memory", + ] + "type" = "object" + } + "requests" = { + "properties" = { + "cpu" = { + "type" = "string" + } + "memory" = { + "type" = "string" + } + } + "required" = [ + "cpu", + "memory", + ] + "type" = "object" + } + } + "required" = [ + "limits", + "requests", + ] + "type" = "object" + } "seedInput" = { "type" = "string" } diff --git a/bootstrap/stage1/metrics_server.tf b/bootstrap/stage1/metrics_server.tf new file mode 100644 index 0000000..151b7e6 --- /dev/null +++ b/bootstrap/stage1/metrics_server.tf @@ -0,0 +1,7 @@ +resource "helm_release" "metrics-server" { + name = "metrics-server" + repository = "https://kubernetes-sigs.github.io/metrics-server/" + chart = "metrics-server" + create_namespace = false + namespace = "kube-system" +} diff --git a/playbook/main.tf b/playbook/main.tf index 6311e33..dfb42c2 100644 --- a/playbook/main.tf +++ b/playbook/main.tf @@ -1,6 +1,8 @@ locals { - namespace = "hydra-doom" - operator_image = "ghcr.io/demeter-run/doom-patrol-operator:sha-f51cab3" + namespace = "hydra-doom" + namespace_dev = "hydra-doom-dev" + operator_image = "ghcr.io/demeter-run/doom-patrol-operator:sha-db2a685" + operator_image_dev = "ghcr.io/demeter-run/doom-patrol-operator:sha-0466797" } terraform { @@ -35,6 +37,12 @@ resource "kubernetes_namespace" "namespace" { } } +resource "kubernetes_namespace" "namespace_dev" { + metadata { + name = local.namespace_dev + } +} + variable "blockfrost_key" { type = string } @@ -51,28 +59,62 @@ variable "dmtr_port_name" { type = string } +variable "init_aws_access_key_id" { + type = string +} + +variable "init_aws_secret_access_key" { + type = string +} + module "stage1" { - source = "../bootstrap/stage1/" - efs_fs_id = "fs-0cfa4cc6888c81f30" + source = "../bootstrap/stage1/" } module "stage2" { source = "../bootstrap/stage2" depends_on = [module.stage1] - namespace = local.namespace - external_domain = "us-east-1.hydra-doom.sundae.fi" - operator_image = local.operator_image - hydra_node_image = "ghcr.io/cardano-scaling/hydra-node:unstable" - sidecar_image = "ghcr.io/demeter-run/doom-patrol-hydra:803df77809e3b5d65ad752603257b31ee05cf481" - open_head_image = "ghcr.io/demeter-run/doom-patrol-hydra:803df77809e3b5d65ad752603257b31ee05cf481" - control_plane_image = "ghcr.io/demeter-run/doom-patrol-hydra:803df77809e3b5d65ad752603257b31ee05cf481" - blockfrost_key = var.blockfrost_key - external_port = 80 - admin_key_path = "${path.module}/admin.sk" - admin_addr = "addr_test1vpgcjapuwl7gfnzhzg6svtj0ph3gxu8kyuadudmf0kzsksqrfugfc" - dmtr_project_id = var.dmtr_project_id - dmtr_api_key = var.dmtr_api_key - dmtr_port_name = var.dmtr_port_name - hydra_scripts_tx_id = "03f8deb122fbbd98af8eb58ef56feda37728ec957d39586b78198a0cf624412a" + namespace = local.namespace + external_domain = "us-east-1.hydra-doom.sundae.fi" + operator_image = local.operator_image + hydra_node_image = "ghcr.io/cardano-scaling/hydra-node:latest" + sidecar_image = "ghcr.io/demeter-run/doom-patrol-hydra:ab28d9f920538bd3b0523448fba574599e328bc9" + open_head_image = "ghcr.io/demeter-run/doom-patrol-hydra:ab28d9f920538bd3b0523448fba574599e328bc9" + control_plane_image = "ghcr.io/demeter-run/doom-patrol-hydra:ab28d9f920538bd3b0523448fba574599e328bc9" + blockfrost_key = var.blockfrost_key + external_port = 80 + admin_key_path = "${path.module}/admin.sk" + admin_addr = "addr_test1vpgcjapuwl7gfnzhzg6svtj0ph3gxu8kyuadudmf0kzsksqrfugfc" + dmtr_project_id = var.dmtr_project_id + dmtr_api_key = var.dmtr_api_key + dmtr_port_name = var.dmtr_port_name + hydra_scripts_tx_id = "03f8deb122fbbd98af8eb58ef56feda37728ec957d39586b78198a0cf624412a" + init_image = "" + init_aws_access_key_id = "" + init_aws_secret_access_key = "" +} + +module "stage2dev" { + source = "../bootstrap/stage2" + depends_on = [module.stage1] + + namespace = local.namespace_dev + external_domain = "dev.hydra-doom.sundae.fi" + operator_image = local.operator_image_dev + hydra_node_image = "ghcr.io/cardano-scaling/hydra-node:latest" + sidecar_image = "ghcr.io/demeter-run/doom-patrol-hydra:ab28d9f920538bd3b0523448fba574599e328bc9" + open_head_image = "ghcr.io/demeter-run/doom-patrol-hydra:ab28d9f920538bd3b0523448fba574599e328bc9" + control_plane_image = "ghcr.io/demeter-run/doom-patrol-hydra:ab28d9f920538bd3b0523448fba574599e328bc9" + blockfrost_key = var.blockfrost_key + external_port = 80 + admin_key_path = "${path.module}/admin.sk" + admin_addr = "addr_test1vpgcjapuwl7gfnzhzg6svtj0ph3gxu8kyuadudmf0kzsksqrfugfc" + dmtr_project_id = var.dmtr_project_id + dmtr_api_key = var.dmtr_api_key + dmtr_port_name = var.dmtr_port_name + hydra_scripts_tx_id = "03f8deb122fbbd98af8eb58ef56feda37728ec957d39586b78198a0cf624412a" + init_image = "ghcr.io/demeter-run/doom-patrol-init:b7b4fc499b5274cd71b6b72f93ab4ba8199437fe" + init_aws_access_key_id = var.init_aws_access_key_id + init_aws_secret_access_key = var.init_aws_secret_access_key } diff --git a/playbook/pod.yml b/playbook/pod.yml index 091f7c3..f784ecf 100644 --- a/playbook/pod.yml +++ b/playbook/pod.yml @@ -10,9 +10,29 @@ apiVersion: hydra.doom/v1alpha1 kind: HydraDoomNode metadata: - name: a00000 + name: a00001 namespace: hydra-doom spec: offline: true seedInput: _unused commitInputs: [] +--- +apiVersion: hydra.doom/v1alpha1 +kind: HydraDoomNode +metadata: + name: a00002 + namespace: hydra-doom-dev +spec: + offline: true + seedInput: _unused + commitInputs: [] +--- +apiVersion: hydra.doom/v1alpha1 +kind: HydraDoomNode +metadata: + name: testoffline + namespace: hydra-doom-dev +spec: + offline: true + seedInput: _unused + commitInputs: [] diff --git a/src/custom_resource.rs b/src/custom_resource.rs index 1741f86..4cef470 100644 --- a/src/custom_resource.rs +++ b/src/custom_resource.rs @@ -1,14 +1,17 @@ -use k8s_openapi::api::{ - apps::v1::{Deployment, DeploymentSpec}, - core::v1::{ - ConfigMap, ConfigMapVolumeSource, Container, ContainerPort, EmptyDirVolumeSource, PodSpec, - PodTemplateSpec, SecretVolumeSource, Service, ServicePort, ServiceSpec, Volume, - VolumeMount, - }, - networking::v1::{ - HTTPIngressPath, HTTPIngressRuleValue, Ingress, IngressBackend, IngressRule, - IngressServiceBackend, IngressSpec, ServiceBackendPort, +use k8s_openapi::{ + api::{ + apps::v1::{Deployment, DeploymentSpec}, + core::v1::{ + ConfigMap, ConfigMapVolumeSource, Container, ContainerPort, EmptyDirVolumeSource, + PodSpec, PodTemplateSpec, ResourceRequirements, SecretVolumeSource, Service, + ServicePort, ServiceSpec, Volume, VolumeMount, + }, + networking::v1::{ + HTTPIngressPath, HTTPIngressRuleValue, Ingress, IngressBackend, IngressRule, + IngressServiceBackend, IngressSpec, ServiceBackendPort, + }, }, + apimachinery::pkg::api::resource::Quantity, }; use kube::{api::ObjectMeta, CustomResource, ResourceExt}; use schemars::JsonSchema; @@ -21,6 +24,26 @@ use super::controller::K8sConstants; pub static HYDRA_DOOM_NODE_FINALIZER: &str = "hydradoomnode/finalizer"; +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +pub struct ResourcesInner { + pub cpu: String, + pub memory: String, +} +impl From<&ResourcesInner> for BTreeMap { + fn from(value: &ResourcesInner) -> Self { + BTreeMap::from([ + ("cpu".to_string(), Quantity(value.cpu.clone())), + ("memory".to_string(), Quantity(value.memory.clone())), + ]) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] +pub struct Resources { + pub requests: ResourcesInner, + pub limits: ResourcesInner, +} + #[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] #[kube( kind = "HydraDoomNode", @@ -46,6 +69,7 @@ pub struct HydraDoomNodeSpec { pub commit_inputs: Vec, pub start_chain_from: Option, pub asleep: Option, + pub resources: Option, } #[derive(Deserialize, Serialize, Clone, Default, Debug, JsonSchema)] @@ -211,7 +235,15 @@ impl HydraDoomNode { ..Default::default() }, ]), - resources: None, // TODO: This should be parameterizable + resources: self + .spec + .resources + .as_ref() + .map(|resources| ResourceRequirements { + requests: Some((&resources.requests).into()), + limits: Some((&resources.limits).into()), + ..Default::default() + }), ..Default::default() }, Container { @@ -277,7 +309,7 @@ impl HydraDoomNode { ..Default::default() }, ]), - resources: None, // TODO: Parametrize this + resources: None, ..Default::default() });