diff --git a/ansible/roles/crafty_controller_deploy/defaults/main.yml b/ansible/roles/crafty_controller_deploy/defaults/main.yml deleted file mode 100644 index 1a201e8b..00000000 --- a/ansible/roles/crafty_controller_deploy/defaults/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -crafty_controller_deploy_kubeconfig: >- - {{ k3s_kubeconfig_path | default('', true) }} -crafty_controller_deploy_cluster_name: >- - {{ k3s_cluster_name | default('', true) }} -crafty_controller_deploy_context: >- - {{ k3s_context_name | default('', true) }} -crafty_controller_deploy_namespace: "apps-crafty-controller" -crafty_controller_deploy_argocd_namespace: "argocd" -crafty_controller_deploy_app_name: "crafty-controller" -crafty_controller_deploy_project: "coachlight-k3s-apps" -crafty_controller_deploy_apply_objects: true -crafty_controller_deploy_repo_url: >- - https://github.com/SRF-Audio/utility-scripts -crafty_controller_deploy_target_revision: "main" -crafty_controller_deploy_path: "k8s/crafty_controller" diff --git a/ansible/roles/crafty_controller_deploy/meta/argument_specs.yml b/ansible/roles/crafty_controller_deploy/meta/argument_specs.yml deleted file mode 100644 index 572696ec..00000000 --- a/ansible/roles/crafty_controller_deploy/meta/argument_specs.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -argument_specs: - main: - short_description: Deploy Crafty Controller via ArgoCD Application - description: > - Deploys an ArgoCD Application that points to k8s/crafty_controller/ - containing raw Kubernetes manifests including Deployment, Service, - Ingress, PVCs, and OnePasswordItem CRD for secrets. - options: - crafty_controller_deploy_kubeconfig: - type: str - required: true - description: Path to kubeconfig file for cluster access. - crafty_controller_deploy_context: - type: str - required: true - description: Kubernetes context to use from kubeconfig. - crafty_controller_deploy_cluster_name: - type: str - required: false - description: Name of the target k8s cluster for artifact metadata. - crafty_controller_deploy_namespace: - type: str - required: false - description: Namespace where Crafty Controller will be deployed. - crafty_controller_deploy_argocd_namespace: - type: str - required: false - description: Namespace where ArgoCD is running. - crafty_controller_deploy_app_name: - type: str - required: false - description: Name of the ArgoCD Application. - crafty_controller_deploy_project: - type: str - required: false - description: ArgoCD project name. - crafty_controller_deploy_repo_url: - type: str - required: false - description: Git repository URL containing the manifests. - crafty_controller_deploy_target_revision: - type: str - required: false - description: Git branch/tag/commit to deploy from. - crafty_controller_deploy_path: - type: str - required: false - description: Path within the git repository to the manifests. - crafty_controller_deploy_apply_objects: - type: bool - required: false - description: Whether to apply the ArgoCD Application manifest. diff --git a/ansible/roles/crafty_controller_deploy/tasks/main.yml b/ansible/roles/crafty_controller_deploy/tasks/main.yml deleted file mode 100644 index 1066f5ab..00000000 --- a/ansible/roles/crafty_controller_deploy/tasks/main.yml +++ /dev/null @@ -1,86 +0,0 @@ ---- -- name: Assert required crafty_controller_deploy inputs are defined - ansible.builtin.assert: - that: - - crafty_controller_deploy_kubeconfig is defined - - crafty_controller_deploy_context is defined - - crafty_controller_deploy_kubeconfig | string | length > 0 - - crafty_controller_deploy_context | length > 0 - fail_msg: > - crafty_controller_deploy_kubeconfig and - crafty_controller_deploy_context must be defined for - crafty_controller_deploy. - -- name: Render crafty_controller_deploy ArgoCD Application manifest - ansible.builtin.template: - src: "{{ role_path }}/templates/application.yml.j2" - dest: "{{ role_path }}/templates/application.yml" - mode: "0644" - -- name: Set static path to pass to k8s_object_manager role - ansible.builtin.set_fact: - crafty_controller_deploy_manifest: >- - {{ role_path }}/templates/application.yml - -- name: Apply Crafty Controller ArgoCD Application manifest - ansible.builtin.include_role: - name: k8s_object_manager - vars: - k8s_object_manager_kubeconfig: >- - {{ crafty_controller_deploy_kubeconfig }} - k8s_object_manager_context: "{{ crafty_controller_deploy_context }}" - k8s_object_manager_state: present - k8s_object_manager_src: "{{ crafty_controller_deploy_manifest }}" - when: crafty_controller_deploy_apply_objects | bool - -- name: Capture k8s_object_manager result if k8s objects were applied - ansible.builtin.set_fact: - crafty_controller_deploy_k8s_result: >- - {{ k8s_object_manager_result }} - when: crafty_controller_deploy_apply_objects | bool - -- name: Set empty k8s result if objects were not applied - ansible.builtin.set_fact: - crafty_controller_deploy_k8s_result: {} - when: not (crafty_controller_deploy_apply_objects | bool) - -- name: Wait for Crafty Controller ArgoCD Application to be Synced and not Degraded - kubernetes.core.k8s_info: - kubeconfig: "{{ crafty_controller_deploy_kubeconfig }}" - context: "{{ crafty_controller_deploy_context }}" - api_version: argoproj.io/v1alpha1 - kind: Application - namespace: "{{ crafty_controller_deploy_argocd_namespace }}" - name: "{{ crafty_controller_deploy_app_name }}" - register: crafty_controller_app_status - until: - - (crafty_controller_app_status.resources | default([])) | length > 0 - - >- - (crafty_controller_app_status.resources[0].status.sync.status | default('')) == - 'Synced' - - >- - (crafty_controller_app_status.resources[0].status.health.status | default('')) != - 'Degraded' - retries: 60 - delay: 5 - when: crafty_controller_deploy_apply_objects | bool - -- name: Persist crafty_controller_deploy artifacts - ansible.builtin.include_role: - name: role_artifacts - vars: - # noqa: var-naming - calling_role_name: "crafty_controller_deploy" - calling_role_artifacts_inputs: - k3s_cluster_name: >- - {{ - crafty_controller_deploy_cluster_name | - default(k3s_cluster_name | default('', true), true) - }} - app_name: "{{ crafty_controller_deploy_app_name }}" - namespace: "{{ crafty_controller_deploy_namespace }}" - argocd_namespace: "{{ crafty_controller_deploy_argocd_namespace }}" - repo_url: "{{ crafty_controller_deploy_repo_url }}" - target_revision: "{{ crafty_controller_deploy_target_revision }}" - path: "{{ crafty_controller_deploy_path }}" - application_result: "{{ crafty_controller_deploy_k8s_result }}" diff --git a/ansible/roles/crafty_controller_deploy/templates/application.yml.j2 b/ansible/roles/crafty_controller_deploy/templates/application.yml.j2 deleted file mode 100644 index d128afa2..00000000 --- a/ansible/roles/crafty_controller_deploy/templates/application.yml.j2 +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Application -metadata: - name: {{ crafty_controller_deploy_app_name }} - namespace: {{ crafty_controller_deploy_argocd_namespace }} -spec: - project: {{ crafty_controller_deploy_project }} - source: - repoURL: {{ crafty_controller_deploy_repo_url }} - targetRevision: {{ crafty_controller_deploy_target_revision }} - path: {{ crafty_controller_deploy_path }} - destination: - server: https://kubernetes.default.svc - namespace: {{ crafty_controller_deploy_namespace }} - syncPolicy: - automated: - prune: true - selfHeal: true - syncOptions: - - CreateNamespace=true diff --git a/ansible/roles/nfs_provisioner_deploy/defaults/main.yml b/ansible/roles/nfs_provisioner_deploy/defaults/main.yml deleted file mode 100644 index da2870a5..00000000 --- a/ansible/roles/nfs_provisioner_deploy/defaults/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -nfs_provisioner_deploy_argocd_namespace: argocd -nfs_provisioner_deploy_argocd_application_path: >- - {{ playbook_dir }}/../argocd/nfs_provisioner/nfs_provisioner.yml -nfs_provisioner_deploy_argocd_app_name: nfs-provisioner -nfs_provisioner_deploy_storageclass_paths: - - "{{ playbook_dir }}/../k8s/storageclasses/nfs-synology-delete.yml" - - "{{ playbook_dir }}/../k8s/storageclasses/nfs-synology-retain.yml" - - "{{ playbook_dir }}/../k8s/storageclasses/nfs-static-retain.yml" -nfs_provisioner_deploy_wait_retries: 30 -nfs_provisioner_deploy_wait_delay: 10 diff --git a/ansible/roles/nfs_provisioner_deploy/meta/argument_specs.yml b/ansible/roles/nfs_provisioner_deploy/meta/argument_specs.yml deleted file mode 100644 index 49bcd815..00000000 --- a/ansible/roles/nfs_provisioner_deploy/meta/argument_specs.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -argument_specs: - main: - short_description: Deploy NFS provisioner via ArgoCD Application - description: > - Applies a static ArgoCD Application manifest for - nfs-subdir-external-provisioner and ensures required StorageClasses exist. - options: - nfs_provisioner_deploy_kubeconfig: - type: raw - required: true - description: >- - Kubeconfig path or dict used to connect to the target cluster. - nfs_provisioner_deploy_context: - type: str - required: true - description: >- - Kubernetes context name to use with the provided kubeconfig. - nfs_provisioner_deploy_argocd_namespace: - type: str - required: false - description: Namespace where ArgoCD is deployed. - nfs_provisioner_deploy_argocd_application_path: - type: str - required: false - description: Path to the ArgoCD Application manifest file. - nfs_provisioner_deploy_argocd_app_name: - type: str - required: false - description: Name of the ArgoCD Application resource. - nfs_provisioner_deploy_storageclass_paths: - type: list - elements: str - required: false - description: List of StorageClass manifest file paths to apply. - nfs_provisioner_deploy_wait_retries: - type: int - required: false - description: Number of retries for polling ArgoCD Application status. - nfs_provisioner_deploy_wait_delay: - type: int - required: false - description: Delay in seconds between polling attempts. diff --git a/ansible/roles/nfs_provisioner_deploy/tasks/main.yml b/ansible/roles/nfs_provisioner_deploy/tasks/main.yml deleted file mode 100644 index c7cbfa28..00000000 --- a/ansible/roles/nfs_provisioner_deploy/tasks/main.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -- name: Assert required inputs are defined - ansible.builtin.assert: - that: - - nfs_provisioner_deploy_kubeconfig is defined - - nfs_provisioner_deploy_context is defined - - nfs_provisioner_deploy_kubeconfig | string | length > 0 - - nfs_provisioner_deploy_context | length > 0 - fail_msg: >- - nfs_provisioner_deploy_kubeconfig and nfs_provisioner_deploy_context - must be defined and non-empty. - -- name: Assert ArgoCD Application manifest exists - ansible.builtin.stat: - path: "{{ nfs_provisioner_deploy_argocd_application_path }}" - register: nfs_provisioner_deploy_argocd_app_stat - failed_when: not nfs_provisioner_deploy_argocd_app_stat.stat.exists - -- name: Check if StorageClass manifests exist - ansible.builtin.stat: - path: "{{ item }}" - loop: "{{ nfs_provisioner_deploy_storageclass_paths }}" - register: nfs_provisioner_deploy_sc_stat - -- name: Verify all StorageClass manifests exist - ansible.builtin.assert: - that: - - item.stat.exists - fail_msg: >- - StorageClass manifest file does not exist: {{ item.item }} - loop: "{{ nfs_provisioner_deploy_sc_stat.results }}" - loop_control: - label: "{{ item.item }}" - -- name: Apply ArgoCD Application manifest - kubernetes.core.k8s: - kubeconfig: "{{ nfs_provisioner_deploy_kubeconfig }}" - context: "{{ nfs_provisioner_deploy_context }}" - state: present - src: "{{ nfs_provisioner_deploy_argocd_application_path }}" - -- name: Wait for ArgoCD Application to become Healthy and Synced - kubernetes.core.k8s_info: - kubeconfig: "{{ nfs_provisioner_deploy_kubeconfig }}" - context: "{{ nfs_provisioner_deploy_context }}" - kind: Application - api_version: argoproj.io/v1alpha1 - namespace: "{{ nfs_provisioner_deploy_argocd_namespace }}" - name: "{{ nfs_provisioner_deploy_argocd_app_name }}" - register: nfs_provisioner_deploy_app_info - until: - - nfs_provisioner_deploy_app_info.resources is defined - - nfs_provisioner_deploy_app_info.resources | length > 0 - - nfs_provisioner_deploy_app_info.resources[0].status is defined - - nfs_provisioner_deploy_app_info.resources[0].status.sync is defined - - >- - nfs_provisioner_deploy_app_info.resources[0].status.sync.status - is defined - - >- - nfs_provisioner_deploy_app_info.resources[0].status.sync.status - == "Synced" - - nfs_provisioner_deploy_app_info.resources[0].status.health is defined - - >- - nfs_provisioner_deploy_app_info.resources[0].status.health.status - is defined - - >- - nfs_provisioner_deploy_app_info.resources[0].status.health.status - == "Healthy" - retries: "{{ nfs_provisioner_deploy_wait_retries }}" - delay: "{{ nfs_provisioner_deploy_wait_delay }}" - -- name: Set nfs_provisioner_deploy artifacts - ansible.builtin.set_fact: - nfs_provisioner_deploy_artifacts: - kubeconfig: "{{ nfs_provisioner_deploy_kubeconfig }}" - context: "{{ nfs_provisioner_deploy_context }}" - argocd_application_name: "{{ nfs_provisioner_deploy_argocd_app_name }}" - argocd_namespace: "{{ nfs_provisioner_deploy_argocd_namespace }}" - storageclasses: - - nfs-synology-delete - - nfs-synology-retain - - nfs-static-retain - -- name: Persist nfs_provisioner_deploy artifacts - ansible.builtin.include_role: - name: role_artifacts - vars: - # noqa: var-naming - calling_role_name: nfs_provisioner_deploy - calling_role_artifacts_inputs: "{{ nfs_provisioner_deploy_artifacts }}" diff --git a/ansible/site.yml b/ansible/site.yml index 768e4576..1a40ecd8 100644 --- a/ansible/site.yml +++ b/ansible/site.yml @@ -82,11 +82,6 @@ 'NextDNS', field='homepage_api_token', vault='HomeLab') }} - crafty_controller_deploy_sfroeber_password: >- - {{ lookup('community.general.onepassword', - 'Crafty Controller', - field='password', - vault='HomeLab') }} proxmox_tailscale_setup_auth_key: >- {{ lookup('community.general.onepassword', 'Tailscale', diff --git a/argocd/apps/apps/frigate.yml b/argocd/apps/apps/frigate.yml new file mode 100644 index 00000000..50b08644 --- /dev/null +++ b/argocd/apps/apps/frigate.yml @@ -0,0 +1,55 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: frigate + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "30" +spec: + project: coachlight-k3s-apps + source: + repoURL: https://blakeblackshear.github.io/blakeshome-charts/ + chart: frigate + targetRevision: "7.8.0" + helm: + valuesObject: + service: + type: ClusterIP + annotations: + tailscale.com/expose: "true" + tailscale.com/tags: "tag:k8s,tag:apps" + tailscale.com/hostname: "frigate" + + persistence: + config: + enabled: true + storageClass: "nfs-synology-retain" + media: + enabled: true + storageClass: "nfs-synology-retain" + + ingress: + enabled: true + className: "" + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/name: "Frigate" + gethomepage.dev/description: "NVR with realtime object detection" + gethomepage.dev/group: "Cameras" + gethomepage.dev/icon: "frigate.png" + hosts: + - host: frigate + paths: + - path: / + pathType: Prefix + + destination: + server: https://kubernetes.default.svc + namespace: frigate + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/ansible/roles/crafty_controller_deploy/templates/application.yml b/argocd/apps/platform/netbox-secrets.yml similarity index 68% rename from ansible/roles/crafty_controller_deploy/templates/application.yml rename to argocd/apps/platform/netbox-secrets.yml index e8dcef1f..3e76f62e 100644 --- a/ansible/roles/crafty_controller_deploy/templates/application.yml +++ b/argocd/apps/platform/netbox-secrets.yml @@ -1,17 +1,20 @@ +--- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: crafty-controller + name: netbox-secrets namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "10" spec: - project: coachlight-k3s-apps + project: coachlight-k3s-infra source: repoURL: https://github.com/SRF-Audio/utility-scripts targetRevision: main - path: k8s/crafty_controller + path: k8s/netbox destination: server: https://kubernetes.default.svc - namespace: apps-crafty-controller + namespace: infra-netbox syncPolicy: automated: prune: true diff --git a/argocd/apps/platform/netbox.yml b/argocd/apps/platform/netbox.yml new file mode 100644 index 00000000..d92d31a5 --- /dev/null +++ b/argocd/apps/platform/netbox.yml @@ -0,0 +1,87 @@ +--- +# NOTE: This NetBox deployment currently requires secrets to be managed +# outside of this manifest. The netbox_deploy Ansible role can be used +# to template credentials until OnePassword integration is completed. +# TODO: Migrate to OnePassword CRDs or Helm chart's existingSecret support +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: netbox + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "20" +spec: + project: coachlight-k3s-infra + source: + repoURL: https://netbox-community.github.io/netbox-chart + chart: netbox + targetRevision: "5.0.0-beta.145" + helm: + valuesObject: + superuser: + email: "admin@netbox.local" + # TODO: Configure superuser.password field + # Options: existingSecret, OnePassword CRD, or templated value + + # TODO: Configure secretKey field (Django secret key) + # Options: existingSecret, OnePassword CRD, or templated value + + persistence: + enabled: true + storageClass: "nfs-synology-retain" + + # Use existing PostgreSQL instance + postgresql: + enabled: false + + # Use existing Redis instance + redis: + enabled: false + + # External database configuration + externalDatabase: + host: postgres-postgresql.db-postgres.svc.cluster.local + port: 5432 + database: netbox + username: netbox + existingSecretName: netbox-db-credentials + existingSecretKey: password + + # External Redis configuration + externalRedis: + host: redis-master.db-redis.svc.cluster.local + port: 6379 + existingSecretName: netbox-redis-credentials + existingSecretKey: password + + service: + type: ClusterIP + annotations: + tailscale.com/expose: "true" + tailscale.com/hostname: "netbox" + tailscale.com/tags: "tag:k8s,tag:infra-monitoring" + + ingress: + enabled: true + className: "" + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/name: "NetBox" + gethomepage.dev/description: "Infrastructure Resource Modeling" + gethomepage.dev/group: "Infrastructure" + gethomepage.dev/icon: "netbox.png" + hosts: + - host: netbox + paths: + - path: / + pathType: Prefix + + destination: + server: https://kubernetes.default.svc + namespace: infra-netbox + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/argocd/apps/platform/velero.yml b/argocd/apps/platform/velero.yml new file mode 100644 index 00000000..7bb41d6c --- /dev/null +++ b/argocd/apps/platform/velero.yml @@ -0,0 +1,58 @@ +--- +# NOTE: Velero requires backup storage configuration. +# The backup storage location and volume snapshot location must be +# configured based on your environment (e.g., S3, Azure, GCS, etc.). +# TODO: Configure backupStorageLocation and volumeSnapshotLocation. +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: velero + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "20" +spec: + project: coachlight-k3s-infra + source: + repoURL: https://vmware-tanzu.github.io/helm-charts + chart: velero + targetRevision: "8.7.0" + helm: + valuesObject: + initContainers: + - name: velero-plugin-for-csi + image: velero/velero-plugin-for-csi:v0.7.1 + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /target + name: plugins + + configuration: + features: EnableCSI + # TODO: Configure backup storage and snapshot locations below + # Example for AWS S3: + # backupStorageLocation: + # - name: default + # provider: aws + # bucket: + # config: + # region: + # volumeSnapshotLocation: + # - name: default + # provider: aws + # config: + # region: + + snapshotsEnabled: true + deployNodeAgent: false + upgradeCRDs: true + cleanUpCRDs: false + + destination: + server: https://kubernetes.default.svc + namespace: infra-velero + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/docs/argo_vs_ansible_deploy_audit.md b/docs/argo_vs_ansible_deploy_audit.md new file mode 100644 index 00000000..97e27769 --- /dev/null +++ b/docs/argo_vs_ansible_deploy_audit.md @@ -0,0 +1,414 @@ +# ArgoCD vs Ansible Deploy Roles Audit + +This document audits all `*_deploy` roles in `ansible/roles/` against ArgoCD Application manifests in `argocd/` to determine which roles are redundant and can be deleted, which need corresponding Argo apps created, and which should be kept. + +**Audit Date:** 2025-12-31 +**Scope:** All roles ending in `_deploy`, excluding `argocd_deploy` and `onepassword_operator_deploy` (bootstrap roles) + +--- + +## Summary + +| Status | Count | Roles | +|--------|-------|-------| +| **DELETED** | 2 | `crafty_controller_deploy`, `nfs_provisioner_deploy` | +| **ARGO APP CREATED** | 3 | `frigate_deploy`, `netbox_deploy`, `velero_deploy` | +| **KEEP ROLE** | 4 | `homepage_deploy`, `omada_deploy`, `paperless_ngx_deploy`, `tailscale_operator_deploy` | +| **EVALUATE FOR DELETION** | 1 | `frigate_deploy` (can likely delete now that Argo app exists) | +| **OUT OF SCOPE** | 1 | `synology_csi_deploy` (infrastructure, not app deployment) | + +--- + +## Detailed Analysis + +### crafty_controller_deploy + +**Role Path:** `ansible/roles/crafty_controller_deploy` + +**Corresponding Argo Manifest(s):** +- `argocd/apps/apps/crafty-controller.yml` ✅ + +**What the role does:** +- Templates an ArgoCD Application manifest from `templates/application.yml.j2` +- Applies the manifest using `k8s_object_manager` role +- Waits for Application to be Synced and not Degraded +- Persists artifacts via `role_artifacts` + +**Redundant?** **YES** + +**Action:** **DELETE ROLE** + +**Notes:** +- The ArgoCD manifest already exists at `argocd/apps/apps/crafty-controller.yml` and is functionally identical +- Role only templates variables that are static (namespace, project, repo URL, path) - all of which are already hardcoded in the existing Argo manifest +- No secrets management, no external resource creation +- No playbook references found + +--- + +### homepage_deploy + +**Role Path:** `ansible/roles/homepage_deploy` + +**Corresponding Argo Manifest(s):** +- `argocd/apps/platform/homepage.yml` ✅ + +**What the role does:** +- Templates an ArgoCD Application manifest with sensitive values (NextDNS token, Proxmox password) +- Creates a Secret with ArgoCD homepage token (`homepage-argocd-secret.yml.j2`) +- Applies optional ingress templates from a configurable directory +- Persists artifacts via `role_artifacts` + +**Redundant?** **NO** + +**Action:** **KEEP ROLE** + +**Notes:** +- Role templates sensitive credentials that cannot be in Git: + - `homepage_nextdns_api_token` + - `proxmox_api_password` + - `argocd_homepage_token` +- The existing Argo manifest has placeholder empty strings for these secrets with TODO comments +- The role also handles optional ingress templates dynamically +- **Recommendation:** Eventually migrate secrets to OnePassword CRDs (as noted in the Argo manifest TODOs), then this role could be deleted + +--- + +### nfs_provisioner_deploy + +**Role Path:** `ansible/roles/nfs_provisioner_deploy` + +**Corresponding Argo Manifest(s):** +- `argocd/apps/platform/nfs-provisioner.yml` ✅ + +**What the role does:** +- Validates that ArgoCD Application manifest exists at a static path +- Validates that StorageClass manifests exist at static paths +- Applies the ArgoCD Application manifest +- Waits for Application to become Healthy and Synced +- Persists artifacts via `role_artifacts` + +**Redundant?** **YES** + +**Action:** **DELETE ROLE** + +**Notes:** +- The ArgoCD manifest already exists and is static +- Role only validates file existence and applies manifest - no templating, no secrets, no resource creation +- The StorageClass manifests it validates are separate from this role and already exist in `k8s/storageclasses/` +- No playbook references found + +--- + +### omada_deploy + +**Role Path:** `ansible/roles/omada_deploy` + +**Corresponding Argo Manifest(s):** +- `argocd/apps/platform/omada-controller.yml` ✅ + +**What the role does:** +- Templates an ArgoCD Application manifest with configurable storage classes, service type, and Tailscale annotations +- Discovers and applies optional ingress templates from a configurable directory +- Persists artifacts via `role_artifacts` + +**Redundant?** **NO** + +**Action:** **KEEP ROLE** + +**Notes:** +- While the Argo manifest exists, the role provides significant flexibility: + - Configurable storage classes for data and logs + - Configurable service type + - Configurable Tailscale annotations and hostname + - Dynamic ingress template discovery and application + - Support for values overrides +- The static Argo manifest has hardcoded values that may not suit all deployments +- The role appears to be used for deployment flexibility across different environments +- **Recommendation:** Consider whether the static Argo manifest should be the source of truth, or if this flexibility is needed + +--- + +### paperless_ngx_deploy + +**Role Path:** `ansible/roles/paperless_ngx_deploy` + +**Corresponding Argo Manifest(s):** +- `argocd/apps/apps/paperless-ngx.yml` ✅ +- `argocd/apps/apps/paperless-ngx-secrets.yml` ✅ + +**What the role does:** +- Templates two ArgoCD Application manifests: one for secrets, one for main app +- Applies secrets Application first, then main Application +- Waits for both Applications to be synced and healthy +- Waits specifically for the webserver Deployment to have available replicas +- Persists artifacts via `role_artifacts` + +**Redundant?** **NO** + +**Action:** **KEEP ROLE** + +**Notes:** +- Both Argo manifests exist in GitOps +- However, the role orchestrates a specific deployment sequence: + 1. Secrets app must deploy and be Healthy first + 2. Then main app deploys + 3. Then waits for specific Deployment readiness +- This orchestration logic is important for ensuring secrets are available before the app starts +- The role provides deployment sequencing that ArgoCD's sync-waves might not fully replace (especially the Deployment-level wait) +- **Recommendation:** Evaluate if ArgoCD sync-waves and health checks are sufficient, or if this orchestration is still needed + +--- + +### tailscale_operator_deploy + +**Role Path:** `ansible/roles/tailscale_operator_deploy` + +**Corresponding Argo Manifest(s):** +- `argocd/apps/operators/tailscale-operator.yml` ✅ +- `argocd/apps/operators/tailscale-operator-secrets.yml` ✅ + +**What the role does:** +- Templates ArgoCD Application manifest with OAuth client credentials +- Applies manifest using `k8s_object_manager` +- Persists artifacts via `role_artifacts` + +**Redundant?** **NO** + +**Action:** **KEEP ROLE** + +**Notes:** +- Role templates sensitive OAuth credentials that cannot be in Git: + - `tailscale_operator_deploy_oauth_client_id` + - `tailscale_operator_deploy_oauth_client_secret` +- The existing Argo manifest uses a different approach: it references a Secret (`tailscale-operator-oauth`) that must exist +- The secrets Application (`tailscale-operator-secrets.yml`) points to `k8s/tailscale_operator/onepassword/` which likely contains OnePasswordItem CRDs +- **Current state:** The role templates credentials into the Application manifest, while the GitOps approach uses OnePassword CRDs +- **Recommendation:** Once OnePassword secrets are fully working, this role can be deleted in favor of the GitOps manifests + +--- + +### frigate_deploy + +**Role Path:** `ansible/roles/frigate_deploy` + +**Corresponding Argo Manifest(s):** +- **MISSING** ❌ + +**What the role does:** +- Templates a Frigate ArgoCD Application manifest with storage classes, Tailscale configuration, and ingress settings +- Templates a separate Homepage ingress manifest +- Applies both manifests using `k8s_object_manager` +- Supports configurable Helm chart values and overrides +- Persists artifacts via `role_artifacts` + +**Redundant?** N/A (no Argo app exists) + +**Action:** **CREATE ARGO APP** + +**Notes:** +- No ArgoCD Application manifest exists in `argocd/apps/` for Frigate +- The role templates significant configuration: + - Helm chart repo: configurable + - Storage classes for Longhorn (config) and Synology (media) + - Tailscale service annotations and ingress + - Dynamic values overrides +- **Work completed:** + 1. ✅ Created `argocd/apps/apps/frigate.yml` with default configuration + 2. ✅ Hardcoded values based on role defaults (Longhorn storage, Tailscale) + 3. ✅ Included Homepage ingress annotations in the Argo manifest +- **Remaining evaluation:** + - The `frigate_deploy` role can likely be deleted now that the Argo app exists + - The role's flexibility (configurable storage, values overrides) is not needed if standard config is sufficient + - **Recommendation:** Evaluate if custom storage or values are needed; if not, delete the role + +--- + +### netbox_deploy + +**Role Path:** `ansible/roles/netbox_deploy` + +**Corresponding Argo Manifest(s):** +- **MISSING** ❌ + +**What the role does:** +- Templates a NetBox ArgoCD Application manifest with sensitive credentials (superuser password, secret key) +- Templates a Homepage ingress manifest +- Applies both manifests using `k8s_object_manager` +- Cleans up temporary directory after deployment +- Persists artifacts via `role_artifacts` + +**Redundant?** N/A (no Argo app exists) + +**Action:** **CREATE ARGO APP** + +**Notes:** +- No ArgoCD Application manifest exists in `argocd/apps/` for NetBox +- The role templates **sensitive credentials** that cannot be in Git: + - `netbox_deploy_superuser_password` + - `netbox_deploy_secret_key` +- **Work completed:** + 1. ✅ Created `argocd/apps/platform/netbox.yml` (main app skeleton) + 2. ✅ Created `argocd/apps/platform/netbox-secrets.yml` (placeholder) + 3. ✅ Created OnePasswordItem structure in `k8s/netbox/onepassword/` +- **Remaining work:** + - The NetBox Helm chart doesn't natively support secretKeyRef for credentials + - Options: + 1. Keep `netbox_deploy` role to template secrets into the Helm values + 2. Investigate if NetBox chart supports `existingSecret` parameter + 3. Create a kustomize overlay that patches the Helm release with secrets + - The Homepage ingress annotations are included in the Argo manifest + - **Current recommendation:** Keep the `netbox_deploy` role until a proper secrets integration is implemented + +--- + +### velero_deploy + +**Role Path:** `ansible/roles/velero_deploy` + +**Corresponding Argo Manifest(s):** +- **MISSING** ❌ + +**What the role does:** +- Templates a Velero ArgoCD Application manifest with: + - CSI plugin configuration + - Backup storage location (provider, bucket, config) + - Volume snapshot location (provider, config) +- Applies manifest using `k8s_object_manager` +- Cleans up temporary directory after deployment +- Persists artifacts via `role_artifacts` + +**Redundant?** N/A (no Argo app exists) + +**Action:** **CREATE ARGO APP** + +**Notes:** +- No ArgoCD Application manifest exists in `argocd/apps/` for Velero +- The role templates complex configuration: + - CSI plugin version + - Backup storage provider configuration (potentially sensitive) + - Snapshot location configuration +- **Work completed:** + 1. ✅ Created `argocd/apps/platform/velero.yml` with base configuration + 2. ✅ Included CSI plugin init container + 3. ✅ Left backup storage location as TODO (requires environment-specific config) +- **Remaining work:** + - Storage provider credentials are likely sensitive (S3 keys, cloud credentials) + - Options: + 1. Keep `velero_deploy` role to template backup storage configuration + 2. Create environment-specific overlays or separate secrets manifest + 3. Use cloud provider workload identity (no secrets needed) + - **Recommendation:** Keep the `velero_deploy` role until backup storage is configured, or use it for environment-specific deployments + +--- + +### synology_csi_deploy + +**Role Path:** `ansible/roles/synology_csi_deploy` + +**Corresponding Argo Manifest(s):** +- **MISSING** ❌ (no match in `argocd/apps/`) + +**What the role does:** +- Templates a Synology CSI ArgoCD Application manifest with: + - NAS host IP from inventory (`groups['synology_nas']`) + - NAS credentials (`k3s_synology_csi_nas_username`, `k3s_synology_csi_nas_password`) + - Storage class configuration +- Applies manifest using `k8s_object_manager` +- Cleans up temporary directory +- Persists artifacts via `role_artifacts` + +**Redundant?** N/A (no Argo app exists) + +**Action:** **OUT OF SCOPE** + +**Notes:** +- This role does NOT follow the naming pattern of other deploy roles (it's infrastructure/storage, not an "app") +- It templates **sensitive credentials** (NAS username/password) +- It dynamically queries inventory for the NAS IP address +- **This is infrastructure provisioning, not app deployment** - it's more similar to `argocd_deploy` and `onepassword_operator_deploy` in nature +- **Recommendation:** Leave this role as-is. It's a infrastructure bootstrap role, not an "app deploy" role. If Argo app is desired: + 1. Create `argocd/apps/platform/synology-csi.yml` and `argocd/apps/platform/synology-csi-secrets.yml` + 2. Create OnePasswordItem CRDs for NAS credentials + 3. Role would still be needed to handle inventory lookups unless those are also moved to OnePassword or ConfigMaps + +--- + +## Actions Required + +### 1. Delete Redundant Roles ✅ COMPLETED + +- [x] Delete `ansible/roles/crafty_controller_deploy/` +- [x] Delete `ansible/roles/nfs_provisioner_deploy/` +- [x] Remove unused variable from `ansible/site.yml` + +### 2. Create Missing Argo Applications ✅ COMPLETED + +#### Frigate ✅ +- [x] Create `argocd/apps/apps/frigate.yml` with: + - Chart: blakeblackshear/frigate + - Storage configuration for Longhorn (config volume) + - Tailscale service and ingress configuration + - Homepage ingress annotations +- [x] No secrets needed for basic deployment +- **Next step:** Evaluate if `frigate_deploy` role can be deleted (likely yes) + +#### NetBox ✅ (Partial - scaffolding created) +- [x] Create `argocd/apps/platform/netbox.yml` with base configuration +- [x] Create `argocd/apps/platform/netbox-secrets.yml` (placeholder) +- [x] Create `k8s/netbox/onepassword/` directory with OnePasswordItem structure +- [ ] **Remaining:** Implement proper secrets integration (chart may not support secretKeyRef) +- [ ] **Remaining:** Test deployment and secrets flow +- **Current state:** Role should be kept until secrets integration is resolved + +#### Velero ✅ (Partial - base created) +- [x] Create `argocd/apps/platform/velero.yml` with CSI plugin +- [x] Document backup storage configuration as TODO +- [ ] **Remaining:** Configure backup storage location and credentials +- **Current state:** Role should be kept for environment-specific backup storage config + +### 3. Future Migrations (Keep Roles for Now) + +These roles should be kept until secrets are migrated to OnePassword CRDs: + +#### Homepage +- [ ] Migrate NextDNS API token to OnePasswordItem CRD +- [ ] Migrate Proxmox API password to OnePasswordItem CRD +- [ ] Migrate ArgoCD homepage token to OnePasswordItem CRD +- [ ] Update `argocd/apps/platform/homepage.yml` to reference secrets +- [ ] Then delete `homepage_deploy` role + +#### Tailscale Operator +- [ ] Verify OnePassword secrets in `k8s/tailscale_operator/onepassword/` are working +- [ ] Verify `argocd/apps/operators/tailscale-operator.yml` correctly references secret +- [ ] Then delete `tailscale_operator_deploy` role + +#### Paperless NGX +- [ ] Verify the deployment orchestration (secrets → main app → wait for deployment) can be fully replaced by ArgoCD sync-waves and health checks +- [ ] Then delete `paperless_ngx_deploy` role + +#### Omada Controller +- [ ] Decide if deployment flexibility (storage classes, service types, dynamic ingress) is truly needed +- [ ] If not needed, hardcode values in `argocd/apps/platform/omada-controller.yml` +- [ ] Then delete `omada_deploy` role + +--- + +## References + +- **ArgoCD Apps:** `argocd/apps/{apps,operators,platform}/*.yml` +- **ArgoCD Projects:** `argocd/projects/*.yml` +- **Kubernetes Manifests:** `k8s/*/` +- **OnePassword Secrets Pattern:** `k8s//onepassword/*.yml` (OnePasswordItem CRDs) +- **Role Artifacts Pattern:** All deploy roles call `role_artifacts` to persist outputs + +--- + +## Lessons Learned / Patterns + +1. **Secrets Management:** Roles that template sensitive credentials must be kept until OnePassword CRDs are in place +2. **Static vs Dynamic Config:** Roles that only template static configuration (namespace, repo, path) are redundant +3. **Deployment Orchestration:** Roles that provide specific deployment sequencing (e.g., secrets before app) may still be valuable +4. **Ingress Flexibility:** Several roles support dynamic ingress templating from configurable directories - this is a pattern to consider preserving or replacing +5. **Infrastructure vs Apps:** Infrastructure/bootstrap roles (CSI, operators) are different from app deploy roles and should be evaluated separately + diff --git a/k8s/netbox/kustomization.yml b/k8s/netbox/kustomization.yml new file mode 100644 index 00000000..7ac5d3a6 --- /dev/null +++ b/k8s/netbox/kustomization.yml @@ -0,0 +1,8 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - namespace.yml + - netbox-credentials.yml + - netbox-db-credentials.yml + - netbox-redis-credentials.yml diff --git a/k8s/netbox/namespace.yml b/k8s/netbox/namespace.yml new file mode 100644 index 00000000..fff01b0c --- /dev/null +++ b/k8s/netbox/namespace.yml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: infra-netbox diff --git a/k8s/netbox/netbox-credentials.yml b/k8s/netbox/netbox-credentials.yml new file mode 100644 index 00000000..4ad21c4b --- /dev/null +++ b/k8s/netbox/netbox-credentials.yml @@ -0,0 +1,8 @@ +--- +apiVersion: onepassword.com/v1 +kind: OnePasswordItem +metadata: + name: netbox-credentials + namespace: infra-netbox +spec: + itemPath: "vaults/HomeLab/items/NetBox" diff --git a/k8s/netbox/netbox-db-credentials.yml b/k8s/netbox/netbox-db-credentials.yml new file mode 100644 index 00000000..0491f87f --- /dev/null +++ b/k8s/netbox/netbox-db-credentials.yml @@ -0,0 +1,8 @@ +--- +apiVersion: onepassword.com/v1 +kind: OnePasswordItem +metadata: + name: netbox-db-credentials + namespace: infra-netbox +spec: + itemPath: "vaults/HomeLab/items/NetBox Database" diff --git a/k8s/netbox/netbox-redis-credentials.yml b/k8s/netbox/netbox-redis-credentials.yml new file mode 100644 index 00000000..7cf08e8d --- /dev/null +++ b/k8s/netbox/netbox-redis-credentials.yml @@ -0,0 +1,8 @@ +--- +apiVersion: onepassword.com/v1 +kind: OnePasswordItem +metadata: + name: netbox-redis-credentials + namespace: infra-netbox +spec: + itemPath: "vaults/HomeLab/items/NetBox Redis"