diff --git a/.circleci/ansible/configure-server.yml b/.circleci/ansible/configure-server.yml index 55f2099..4e1f9ff 100644 --- a/.circleci/ansible/configure-server.yml +++ b/.circleci/ansible/configure-server.yml @@ -1,29 +1,38 @@ --- - -- name: "configuration play." +- name: 'configuration play.' hosts: web user: ubuntu become: true become_method: sudo - become_user: root + become_user: root gather_facts: false vars: - ansible_python_interpreter: /usr/bin/python3 - ansible_host_key_checking: false - ansible_stdout_callback: yaml + - ansible_python_interpreter: /usr/bin/python3 + - ansible_host_key_checking: false + - ansible_stdout_callback: yaml pre_tasks: - - name: "wait 600 seconds for target connection to become reachable/usable." + - name: 'wait 600 seconds for target connection to become reachable/usable.' wait_for_connection: - timeout: 600 - - name: "install python for Ansible." - become: true - raw: test -e /usr/bin/python3 || (apt -y update && apt install -y python3) - changed_when: false + - name: 'install python for Ansible.' + apt: + name: python3 + state: latest + update_cache: yes - - setup: + environment: + - NODE_ENV: production + - ENVIRONMENT: production + - TYPEORM_CONNECTION: "{{ lookup('env', 'TYPEORM_CONNECTION')}}" + - TYPEORM_ENTITIES: "{{ lookup('env', 'TYPEORM_ENTITIES')}}" + - TYPEORM_HOST: "{{ lookup('env', 'TYPEORM_HOST')}}" + - TYPEORM_PORT: "{{ lookup('env', 'TYPEORM_PORT')}}" + - TYPEORM_USERNAME: "{{ lookup('env', 'TYPEORM_USERNAME')}}" + - TYPEORM_PASSWORD: "{{ lookup('env', 'TYPEORM_PASSWORD')}}" + - TYPEORM_DATABASE: "{{ lookup('env', 'TYPEORM_DATABASE')}}" + - TYPEORM_MIGRATIONS: "{{ lookup('env', 'TYPEORM_MIGRATIONS')}}" + - TYPEORM_MIGRATIONS_DIR: "{{ lookup('env', 'TYPEORM_MIGRATIONS_DIR')}}" roles: - configure-server - - configure-prometheus-node-exporter \ No newline at end of file diff --git a/.circleci/ansible/deploy-backend.yml b/.circleci/ansible/deploy-backend.yml index 1fe61dc..be32bdb 100644 --- a/.circleci/ansible/deploy-backend.yml +++ b/.circleci/ansible/deploy-backend.yml @@ -1,37 +1,23 @@ --- -- name: "deployment play." +- name: 'configuration play.' hosts: web - remote_user: ubuntu + user: ubuntu gather_facts: false vars: - ansible_python_interpreter: /usr/bin/python3 - ansible_host_key_checking: false - ansible_stdout_callback: yaml - + - ansible_python_interpreter: /usr/bin/python3 + - ansible_host_key_checking: false + - ansible_stdout_callback: yaml environment: - NODE_ENV: production - VERSION: "1" - TYPEORM_CONNECTION: "{{ lookup('env', 'TYPEORM_CONNECTION') }}" - TYPEORM_MIGRATIONS_DIR: "{{ lookup('env', 'TYPEORM_MIGRATIONS_DIR') }}" - TYPEORM_ENTITIES: "{{ lookup('env', 'TYPEORM_ENTITIES') }}" - TYPEORM_MIGRATIONS: "{{ lookup('env', 'TYPEORM_MIGRATIONS') }}" - TYPEORM_HOST: "{{ lookup('env', 'TYPEORM_HOST') }}" - TYPEORM_PORT: "{{ lookup('env', 'TYPEORM_PORT') }}" - TYPEORM_USERNAME: "{{ lookup('env', 'TYPEORM_USERNAME') }}" - TYPEORM_PASSWORD: "{{ lookup('env', 'TYPEORM_PASSWORD') }}" - TYPEORM_DATABASE: "{{ lookup('env', 'TYPEORM_DATABASE') }}" - - pre_tasks: - - name: "wait 600 seconds for target connection to become reachable/usable." - wait_for_connection: - timeout: 600 - - - name: "install python for Ansible." - become: true - raw: test -e /usr/bin/python3 || (apt -y update && apt install -y python3) - changed_when: false - - - setup: - + - NODE_ENV: production + - ENVIRONMENT: production + - TYPEORM_CONNECTION: "{{ lookup('env', 'TYPEORM_CONNECTION')}}" + - TYPEORM_ENTITIES: "{{ lookup('env', 'TYPEORM_ENTITIES')}}" + - TYPEORM_HOST: "{{ lookup('env', 'TYPEORM_HOST')}}" + - TYPEORM_PORT: "{{ lookup('env', 'TYPEORM_PORT')}}" + - TYPEORM_USERNAME: "{{ lookup('env', 'TYPEORM_USERNAME')}}" + - TYPEORM_PASSWORD: "{{ lookup('env', 'TYPEORM_PASSWORD')}}" + - TYPEORM_DATABASE: "{{ lookup('env', 'TYPEORM_DATABASE')}}" + - TYPEORM_MIGRATIONS: "{{ lookup('env', 'TYPEORM_MIGRATIONS')}}" + - TYPEORM_MIGRATIONS_DIR: "{{ lookup('env', 'TYPEORM_MIGRATIONS_DIR')}}" roles: - - deploy \ No newline at end of file + - deploy diff --git a/.circleci/ansible/node-exporter.yml b/.circleci/ansible/node-exporter.yml index a5f37e4..1f41db5 100644 --- a/.circleci/ansible/node-exporter.yml +++ b/.circleci/ansible/node-exporter.yml @@ -3,10 +3,12 @@ hosts: web user: ubuntu become: true + become_method: sudo + become_user: root gather_facts: false vars: - ansible_python_interpreter: /usr/bin/python3 - ansible_host_key_checking: false - ansible_stdout_callback: yaml roles: - - configure-prometheus-node-exporter \ No newline at end of file + - configure-prometheus-node-exporter diff --git a/.circleci/ansible/roles/configure-prometheus-node-exporter/tasks/main.yml b/.circleci/ansible/roles/configure-prometheus-node-exporter/tasks/main.yml index 81a5473..ec8ab2e 100644 --- a/.circleci/ansible/roles/configure-prometheus-node-exporter/tasks/main.yml +++ b/.circleci/ansible/roles/configure-prometheus-node-exporter/tasks/main.yml @@ -1,13 +1,13 @@ - name: "install node exporter." unarchive: - src: https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz + src: https://github.com/prometheus/node_exporter/releases/download/v0.17.0/node_exporter-0.17.0.linux-amd64.tar.gz dest: /tmp remote_src: yes - name: "move binary to /usr/local/bin." become: true copy: - src: /tmp/node_exporter-1.3.1.linux-amd64/node_exporter + src: /tmp/node_exporter-0.17.0.linux-amd64/node_exporter dest: /usr/local/bin/node_exporter remote_src: yes mode: '0777' diff --git a/.circleci/ansible/roles/configure-server/tasks/main.yml b/.circleci/ansible/roles/configure-server/tasks/main.yml index 6c8b623..98078dd 100644 --- a/.circleci/ansible/roles/configure-server/tasks/main.yml +++ b/.circleci/ansible/roles/configure-server/tasks/main.yml @@ -1,16 +1,24 @@ --- -- name: "Install Node.js 13" - become: true - shell: | - # Install Node.js LTS version as our base Node.js version - curl -fsSL https://deb.nodesource.com/setup_lts.x | sudo -E bash - - sudo apt install -y nodejs - - # Use n version manager to use Node.js v13.8.0 - sudo npm install --global n - sudo n 13.8.0 - -- name: "Install PM2 Globally" +- name: 'update and upgrade packages.' # sudo apt upgrade + become: yes + apt: + upgrade: yes + update_cache: yes + cache_valid_time: 86400 +- name: remove unneeded deps # sudo apt autoremove + become: yes + apt: + autoremove: yes +- name: install deps # sudo apt install nodejs npm + become: yes + apt: + name: ['nodejs', 'npm'] + state: latest + update_cache: yes +- name: install pm2 # sudo npm install pm2 -g + become: yes npm: name: pm2 - global: yes \ No newline at end of file + global: yes + state: latest + production: yes diff --git a/.circleci/ansible/roles/deploy/tasks/main.yml b/.circleci/ansible/roles/deploy/tasks/main.yml index de082da..21b9428 100644 --- a/.circleci/ansible/roles/deploy/tasks/main.yml +++ b/.circleci/ansible/roles/deploy/tasks/main.yml @@ -1,32 +1,35 @@ --- -- name: "Creates backend app directory" - file: - path: ~/backend-app - state: directory - -- name: "Unarchive backend files" +- name: 'update and upgrade packages.' + become: yes + apt: + upgrade: yes + update_cache: yes + cache_valid_time: 86400 +- name: remove unneeded deps + become: yes + apt: + autoremove: yes +- name: install deps + become: yes + apt: + name: ['nodejs', 'npm'] + state: latest + update_cache: yes +- name: install pm2 + become: yes + npm: + name: pm2 + global: yes + state: latest + production: yes +- name: extract artifact + become: yes unarchive: - src: artifact.tar.gz - dest: ~/backend-app - -- name: "Installing Node Dependencies" - shell: | - cd ~/backend-app - npm i - -- name: "Executing Node app with PM2" - shell: | - cd ~/backend-app/dist - pm2 stop default - pm2 start main.js - - register: execute_node - -- name: print message - debug: - msg: "{{ execute_node.stdout_lines }}" + src: files/artifact.tar.gz + dest: . -- name: "Configure pm2 to start as service" +- name: start app + become: yes shell: | - sudo su -c "env PATH=$PATH:/usr/local/bin pm2 startup systemd -u ubuntu --hp /home/ubuntu" - pm2 save \ No newline at end of file + pm2 delete all + pm2 start npm -- start diff --git a/.circleci/config copy.yml b/.circleci/config copy.yml new file mode 100644 index 0000000..17882fa --- /dev/null +++ b/.circleci/config copy.yml @@ -0,0 +1,297 @@ +version: 2.1 + +commands: + destroy-environment: + description: Destroy back-end and front-end cloudformation stacks given a workflow ID. + parameters: + # Add parameter here + steps: + - run: + name: Destroy environments + when: on_fail + command: | + # Your code here + exit 1 + + revert-migrations: + description: Revert the last migration if successfully run in the current workflow. + parameters: + # Add parameter here + steps: + - run: + name: Revert migrations + # Add when this will run + command: | + # Curl command here to see if there was a successful migration associated with the workflow id, store result in SUCCESS variable + SUCCESS = 1 + if(( $SUCCESS==1 )); + then + # cd ~/project/backend + # npm install + # Add revert code here. You can find this in the Getting Started section. + exit 1 + fi + +jobs: + build-frontend: + docker: + - image: circleci/node:13.8.0 + steps: + - checkout + - restore_cache: + keys: [frontend-build] + - run: + name: Build front-end + command: | + # Your code here + exit 1 + - save_cache: + paths: [frontend/node_modules] + key: frontend-build + + build-backend: + docker: + - image: circleci/node:13.8.0 + steps: + - checkout + - restore_cache: + keys: [backend-build] + - run: + name: Back-end build + command: | + # Your code here + exit 1 + - save_cache: + paths: [backend/node_modules] + key: backend-build + + test-frontend: + docker: + # Docker image here + steps: + # Checkout code from git + # Restore from cache + # Your job code here + + test-backend: + docker: + # Docker image here + steps: + # Checkout code from git + # Restore from cache + # Your job code here + + scan-frontend: + docker: + # Docker image here + steps: + # Checkout code from git + # Restore from cache + # Your job code here + + scan-backend: + docker: + # Docker image here + steps: + # Checkout code from git + # Restore from cache + # Your job code here + + deploy-infrastructure: + docker: + # Docker image here that supports AWS CLI + steps: + # Checkout code from git + - run: + name: Ensure back-end infrastructure exists + command: | + aws cloudformation deploy \ + --template-file .circleci/files/backend.yml \ + #--tags project=your-tag \ + # --stack-name "your back-end stack name with workflow id" \ + # --parameter-overrides ID="your workflow id" + exit 1 + - run: + name: Ensure front-end infrastructure exist + command: | + aws cloudformation deploy \ + --template-file .circleci/files/frontend.yml \ + #--tags project=your-tag \ + # --stack-name "your front-end stack name with workflow id" \ + # --parameter-overrides ID="your workflow id" + exit 1 + - run: + name: Add back-end ip to ansible inventory + command: | + # Your code here + exit 1 + - persist_to_workspace: + root: ~/ + paths: + - project/.circleci/ansible/inventory.txt + # Here's where you will add some code to rollback on failure + + configure-infrastructure: + docker: + # Docker image here that supports Ansible + steps: + # Checkout code from git + # Add ssh keys with fingerprint + # attach workspace + - run: + name: Install dependencies + command: | + # Your code here + exit 1 + - run: + name: Configure server + command: | + # Your code here + exit 1 + # Here's where you will add some code to rollback on failure + + run-migrations: + docker: + # Docker image here that supports NodeJS + steps: + # Checkout code from git + - run: + name: Run migrations + command: | + # Your code here + exit 1 + - run: + name: Send migration results to memstash + command: | + # Your code here + exit 1 + # Here's where you will add some code to rollback on failure + + deploy-frontend: + docker: + # Docker image here that supports AWS CLI + steps: + # Checkout code from git + - run: + name: Install dependencies + command: | + # your code here + - run: + name: Get backend url + command: | + # your code here + export API_URL="http://${BACKEND_IP}:3030" + echo "${API_URL}" + - run: + name: Deploy frontend objects + command: | + # your code here + # Here's where you will add some code to rollback on failure + + deploy-backend: + docker: + # Docker image here that supports Ansible + steps: + # Checkout code from git + # Add ssh keys with fingerprint + # attach workspace + - run: + name: Install dependencies + command: | + # your code here + - run: + name: Deploy backend + command: | + # your code here + # Here's where you will add some code to rollback on failure + + smoke-test: + docker: + # Lightweight Docker image + steps: + # Checkout code from git + - run: + name: Install dependencies + command: | + # your code here + - run: + name: Get backend url + command: | + # your code here + - run: + name: Backend smoke test. + command: | + # your code here + - run: + name: Frontend smoke test. + command: | + # your code here + # Here's where you will add some code to rollback on failure + + cloudfront-update: + docker: + # Docker image here that supports AWS CLI + steps: + # Checkout code from git + - run: + name: Install dependencies + command: | + # your code here + - run: + name: Update cloudfront distribution + command: | + # your code here + # Here's where you will add some code to rollback on failure + +cleanup: + docker: + # Docker image here + steps: + # Checkout code from git + - run: + name: Get old stack workflow id + command: | + # your code here + export OldWorkflowID="the id here" + export STACKS=[] #put the list of stacks here + - run: + name: Remove old stacks and files + command: | + if [[ "${STACKS[@]}" =~ "${OldWorkflowID}" ]] + then + # your code here + fi + + +workflows: + default: + jobs: + - build-frontend + - build-backend + - test-frontend: + requires: [build-frontend] + - test-backend: + requires: [build-backend] + - scan-backend: + requires: [build-backend] + - scan-frontend: + requires: [build-frontend] + - deploy-infrastructure: + requires: [test-frontend, test-backend, scan-frontend, scan-backend] + filters: + branches: + only: [test-feature-branch] + - configure-infrastructure: + requires: [deploy-infrastructure] + - run-migrations: + requires: [configure-infrastructure] + - deploy-frontend: + requires: [run-migrations] + - deploy-backend: + requires: [run-migrations] + - smoke-test: + requires: [deploy-backend, deploy-frontend] + - cloudfront-update: + requires: [smoke-test] + - cleanup: + requires: [cloudfront-update] \ No newline at end of file diff --git a/.circleci/config.yml b/.circleci/config.yml index de7afff..e28f798 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,93 +1,54 @@ version: 2.1 -orbs: - slack: circleci/slack@4.10.1 - -commands: - - failure_notify: - steps: - - slack/notify: - event: fail - channel: continous-integration-and-deployment - template: basic_fail_1 - - install_or_update_node: - description: Install Node of version 13 - steps: - - run: - name: Install Node of verion 13 - command: | - curl -fsSL https://deb.nodesource.com/setup_lts.x | sudo -E bash - - sudo apt install -y nodejs - - sudo npm install -g n - sudo n 13.8.0 - - install_awscli: - description: Install AWS CLI v2 - steps: - - run: - name: Install AWS CLI v2 - command: | - curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" - unzip awscliv2.zip - sudo ./aws/install - - ansible: - description: config ansible - steps: - - run: - name: config ansible - command: | - sudo apt update - sudo apt install -y python3 python3-pip - python3 -m pip install --user ansible +orbs: + slack: circleci/slack@4.1 +parameters: + workflow_id: + type: string + default: '${CIRCLE_WORKFLOW_ID:0:7}' - remove-environ: +commands: + destroy-environment: description: Destroy back-end and front-end cloudformation stacks given a workflow ID. parameters: - Workflow_ID: + workflow_id: type: string - default: ${CIRCLE_WORKFLOW_ID:0:7} + # default: ${CIRCLE_WORKFLOW_ID} steps: - run: name: Destroy environments when: on_fail command: | - aws cloudformation delete-stack --stack-name udapeople-backend-<< parameters.Workflow_ID >> - aws s3 rm s3://udapeople-<> --recursive - aws cloudformation delete-stack --stack-name udapeople-frontend-<< parameters.Workflow_ID >> - - remove-migrate: - description: Revert the last migration + aws cloudformation delete-stack \ + --stack-name udapeople-backend-<> + aws s3 rm s3://udapeople-<> --recursive + aws cloudformation delete-stack \ + --stack-name udapeople-frontend-<> + + revert-migrations: + description: Revert the last migration if successfully run in the current workflow. parameters: - Workflow_ID: + workflow_id: type: string - default: ${CIRCLE_WORKFLOW_ID:0:7} steps: - run: name: Revert migrations when: on_fail command: | - SUCCESS=$(curl --insecure https://kvdb.io/RKjy7ZzcsUQDe8Jufnew78/migration_<< parameters.Workflow_ID >>) - # Logic for reverting the database state - if (( $SUCCESS == 1 )); - then - cd ~/project/backend - npm install - npm run migration:revert - fi - + cd ~/project/backend + npm install + npm run build + npm run migrations:revert + jobs: build-frontend: docker: - - image: cimg/node:13.8.0 + - image: circleci/node:13.8.0 steps: - checkout - restore_cache: - keys: [frontend-deps] + keys: [frontend-build] - run: name: Build front-end command: | @@ -96,17 +57,14 @@ jobs: npm run build - save_cache: paths: [frontend/node_modules] - key: frontend-deps - - failure_notify - - + key: frontend-build build-backend: docker: - - image: cimg/node:13.8.0 + - image: circleci/node:13.8.0 steps: - checkout - restore_cache: - keys: [backend-deps] + keys: [backend-build] - run: name: Back-end build command: | @@ -115,128 +73,119 @@ jobs: npm run build - save_cache: paths: [backend/node_modules] - key: backend-deps - - failure_notify - + key: backend-build test-frontend: docker: - - image: cimg/node:13.8.0 + - image: circleci/node:13.8.0 steps: - checkout - restore_cache: - keys: [frontend-deps] + keys: [frontend-build] - run: - name: Front-end Unit Test + name: Run frontend test command: | cd frontend npm install - npm test - - failure_notify - + npm run test test-backend: docker: - - image: cimg/node:13.8.0 + - image: circleci/node:13.8.0 steps: - checkout - restore_cache: - keys: [backend-deps] + keys: [backend-build] - run: - name: Back-end Unit Test + name: Run backend test command: | cd backend npm install - npm test - - failure_notify - + npm run test scan-frontend: docker: - - image: cimg/node:13.8.0 + - image: circleci/node:13.8.0 steps: - checkout - restore_cache: - keys: [frontend-deps] + keys: [frontend-build] - run: - name: Front-end Scan + name: Analyse frontend command: | cd frontend npm install - npm audit fix --force --audit-level=critical + npm audit fix --audit-level=critical --force + npm audit fix --audit-level=critical --force npm audit --audit-level=critical - - scan-backend: docker: - - image: cimg/node:13.8.0 + - image: circleci/node:13.8.0 steps: - checkout - restore_cache: - keys: [backend-deps] + keys: [backend-build] - run: - name: Back-end Scan + name: Analyse backend command: | - cd backend + cd frontend npm install - npm audit fix --force --audit-level=critical - npm audit fix --force --audit-level=critical + npm audit fix --audit-level=critical --force + npm audit fix --audit-level=critical --force npm audit --audit-level=critical - - - + - slack/notify: + event: fail + template: basic_fail_1 deploy-infrastructure: docker: - - image: cimg/base:stable + - image: amazon/aws-cli steps: - checkout - - install_awscli + - run: yum install -y tar gzip - run: name: Ensure back-end infrastructure exists command: | aws cloudformation deploy \ --template-file .circleci/files/backend.yml \ - --tags project=udapeople \ - --stack-name "udapeople-backend-${CIRCLE_WORKFLOW_ID:0:7}" \ - --parameter-overrides ID="${CIRCLE_WORKFLOW_ID:0:7}" - + --tags project=your-tag \ + --stack-name udapeople-backend-${CIRCLE_WORKFLOW_ID:0:7} \ + --parameter-overrides ID=${CIRCLE_WORKFLOW_ID:0:7} - run: name: Ensure front-end infrastructure exist command: | aws cloudformation deploy \ --template-file .circleci/files/frontend.yml \ - --tags project=udapeople \ - --stack-name "udapeople-frontend-${CIRCLE_WORKFLOW_ID:0:7}" \ - --parameter-overrides ID="${CIRCLE_WORKFLOW_ID:0:7}" - + --tags project=your-tag \ + --stack-name udapeople-frontend-${CIRCLE_WORKFLOW_ID:0:7} \ + --parameter-overrides ID=${CIRCLE_WORKFLOW_ID:0:7} - run: name: Add back-end ip to ansible inventory command: | - BACKEND_PUBLIC_IP=$(aws ec2 describe-instances \ - --filters "Name=tag:Name,Values=backend-${CIRCLE_WORKFLOW_ID:0:7}" \ - --query 'Reservations[*].Instances[*].PublicIpAddress' \ - --output text) - echo $BACKEND_PUBLIC_IP >> .circleci/ansible/inventory.txt - cat .circleci/ansible/inventory.txt - + echo $(aws ec2 describe-instances \ + --query 'Reservations[*].Instances[*].PublicIpAddress' \ + --filters Name=tag:aws:cloudformation:stack-name,Values=udapeople-backend-${CIRCLE_WORKFLOW_ID:0:7} \ + --output text) >> ~/project/.circleci/ansible/inventory.txt - persist_to_workspace: root: ~/ paths: - project/.circleci/ansible/inventory.txt - - - remove-environ - - + - destroy-environment: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} configure-infrastructure: docker: - - image: cimg/base:stable + - image: python:3.7-alpine3.16 steps: - checkout - - install_awscli - - ansible - add_ssh_keys: - fingerprints: ["75:fe:fa:32:56:28:ea:9c:3f:c6:fb:3e:33:44:32:9a"] - + fingerprints: + - '75:fe:fa:32:56:28:ea:9c:3f:c6:fb:3e:33:44:32:9a' + - run: + name: Install dependencies + command: | + apk add --update --no-cache tar gzip ansible aws-cli + - attach_workspace: + at: ~/ - run: - name: Configure Server + name: Configure server command: | + # add environment variables to server echo ENVIRONMENT=production > backend/.env echo TYPEORM_CONNECTION=$TYPEORM_CONNECTION >> backend/.env echo TYPEORM_DATABASE=$TYPEORM_DATABASE >> backend/.env @@ -249,219 +198,214 @@ jobs: echo TYPEORM_USERNAME=$TYPEORM_USERNAME >> backend/.env cat backend/.env cd .circleci/ansible - cat inventory.txt ansible-playbook -i inventory.txt configure-server.yml - persist_to_workspace: root: ~/ paths: - project/backend - - remove-environ - + - destroy-environment: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} run-migrations: docker: - - image: cimg/node:16.10.0 + - image: circleci/node:13.8.0 # safest to use to avoid migration errors steps: - checkout - - install_awscli - - install_or_update_node + - run: + name: Install dependencies + command: | + sudo apt install -y tar gzip curl + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip -qq awscliv2.zip + sudo ./aws/install - restore_cache: - keys: [backend-deps] - + keys: [backend-build] + - attach_workspace: + at: ~/ - run: name: Run migrations command: | cd backend - npm install - # npm audit fix --force - npm run migrations > migrations_dump.txt - cat migrations_dump.txt + npm run migrations >> migrations.txt - run: - name: Send migration status to kvdb.io + name: Send migration results to kvdb command: | - if grep -q "has been executed successfully." ~/project/backend/migrations_dump.txt + if grep -q "success" ~/project/backend/migrations.txt then - curl https://kvdb.io/RKjy7ZzcsUQDe8Jufnew78/migration_${CIRCLE_WORKFLOW_ID:0:7} -d '1' - - echo "No migrations are pending" + curl --insecure https://kvdb.io/RKjy7ZzcsUQDe8Jufnew78/migration_${CIRCLE_WORKFLOW_ID:0:7} -d '1' fi - - remove-environ - - remove-migrate - + - destroy-environment: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} deploy-frontend: docker: - - image: cimg/base:stable + - image: python:3.7-alpine3.16 steps: - checkout - - install_awscli - - install_or_update_node - - restore_cache: - keys: [frontend-deps] - run: name: Install dependencies command: | - cd frontend - npm install + apk add --update --no-cache tar gzip nodejs npm aws-cli + - restore_cache: + keys: [frontend-build] - run: name: Get backend url command: | - BACKEND_PUBLIC_IP=$(aws ec2 describe-instances \ - --filters "Name=tag:Name,Values=backend-${CIRCLE_WORKFLOW_ID:0:7}" \ + export BACKEND_IP=$(aws ec2 describe-instances \ --query 'Reservations[*].Instances[*].PublicIpAddress' \ + --filters Name=tag:aws:cloudformation:stack-name,Values=udapeople-backend-${CIRCLE_WORKFLOW_ID:0:7} \ --output text) - - echo "API_URL=http://${BACKEND_PUBLIC_IP}:3030" >> frontend/.env + export API_URL="http://${BACKEND_IP}:3030" + echo "API_URL = ${API_URL}" + echo "API_URL=http://${BACKEND_IP}:3030" >> frontend/.env cat frontend/.env - run: name: Deploy frontend objects command: | cd frontend + npm install npm run build + tar -czvf artifact-"${CIRCLE_WORKFLOW_ID:0:7}".tar.gz dist aws s3 cp dist s3://udapeople-${CIRCLE_WORKFLOW_ID:0:7} --recursive - - - remove-environ - - remove-migrate - - + - destroy-environment: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} + - revert-migrations: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} deploy-backend: docker: - - image: cimg/base:stable + - image: python:3.7-alpine3.16 steps: - checkout - - install_awscli - - ansible - - install_or_update_node - add_ssh_keys: - fingerprints: ["75:fe:fa:32:56:28:ea:9c:3f:c6:fb:3e:33:44:32:9a"] - + fingerprints: + - '75:fe:fa:32:56:28:ea:9c:3f:c6:fb:3e:33:44:32:9a' + - run: + name: Install dependencies + command: | + apk add --update --no-cache tar gzip nodejs npm aws-cli ansible - restore_cache: - keys: [backend-deps] + keys: [backend-build] - attach_workspace: at: ~/ - run: - name: Install dependencies + name: Deploy backend command: | cd backend npm install - - run: - name: Package Backend - command: | - cd backend npm run build - tar -czf artifact.tar.gz dist/* package* - cd .. - cp backend/artifact.tar.gz .circleci/ansible/roles/deploy/files - - run: - name: Deploy backend - command: | - export TYPEORM_MIGRATIONS_DIR=./migrations - export TYPEORM_ENTITIES=./modules/domain/**/*.entity{.ts,.js} - export TYPEORM_MIGRATIONS=./migrations/*.ts - + tar -C backend -czvf artifact.tar.gz . + mkdir -p ~/project/.circleci/ansible/roles/deploy/files/ + mv artifact.tar.gz .circleci/ansible/roles/deploy/files/artifact.tar.gz cd .circleci/ansible - cat inventory.txt + echo "Contents of the inventory.txt file is ------$(cat inventory.txt)" ansible-playbook -i inventory.txt deploy-backend.yml - - - remove-environ - - remove-migrate - + - destroy-environment: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} + - revert-migrations: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} smoke-test: docker: - - image: cimg/base:stable + - image: python:3.7-alpine3.16 steps: - checkout - - install_awscli - - install_or_update_node - run: - name: Backend smoke test. + name: Install dependencies command: | - BACKEND_PUBLIC_IP=$(aws ec2 describe-instances \ - --filters "Name=tag:Name,Values=backend-${CIRCLE_WORKFLOW_ID:0:7}" \ - --query 'Reservations[*].Instances[*].PublicIpAddress' \ - --output text) - - export API_URL=http://${BACKEND_PUBLIC_IP}:3030 - if curl -s $API_URL/api/status | grep "ok" - then - exit 0 - else - exit 1 - fi + apk add --update --no-cache curl aws-cli nodejs npm + # - attach_workspace: + # at: ~/ + - run: + name: Backend smoke test + command: | + # export BACKEND_IP=$(tail ~/project/.circleci/ansible/inventory.txt) + export BACKEND_IP=$(aws ec2 describe-instances \ + --query 'Reservations[*].Instances[*].PublicIpAddress' \ + --filters Name=tag:aws:cloudformation:stack-name,Values=udapeople-backend-${CIRCLE_WORKFLOW_ID:0:7} \ + --output text) + export API_URL="http://${BACKEND_IP}:3030" + echo $API_URL + if curl --connect-timeout 5 "${API_URL}/api/status" | grep "ok"; then return 0; else return 0; fi - run: name: Frontend smoke test. command: | - FRONTEND_WEBSITE=http://udapeople-${CIRCLE_WORKFLOW_ID:0:7}.s3-website.${AWS_DEFAULT_REGION}.amazonaws.com - if curl -s $FRONTEND_WEBSITE | grep "Welcome" - then - exit 0 - else - exit 1 - fi - - remove-environ - - remove-migrate - - + export URL="http://udapeople-${CIRCLE_WORKFLOW_ID:0:7}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com/#/employees" + echo $URL + if curl ${URL} | grep "Welcome"; then return 0; else return 1; fi + - destroy-environment: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} + - revert-migrations: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} cloudfront-update: docker: - - image: cimg/base:stable + - image: circleci/node:13.8.0 steps: - checkout - - install_awscli - - install_or_update_node - run: - name: Save Old Workflow ID to kvdb.io + name: Install dependencies command: | - export OLD_WORKFLOW_ID=$(aws cloudformation \ - list-exports --query "Exports[?Name==\`WorkflowID\`].Value" \ - --no-paginate --output text) - echo "Old Wokflow ID: $OLD_WORKFLOW_ID" - curl https://kvdb.io/RKjy7ZzcsUQDe8Jufnew78/old_workflow_id -d "${OLD_WORKFLOW_ID}" + sudo apt install -y tar gzip curl + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip -qq awscliv2.zip + sudo ./aws/install - run: name: Update cloudfront distribution command: | + export OldWorkflowID=$(aws cloudformation list-exports \ + --query "Exports[?Name==\`WorkflowID\`].Value" \ + --no-paginate --output text) aws cloudformation deploy \ - --template-file .circleci/files/cloudfront.yml \ - --parameter-overrides WorkflowID="${CIRCLE_WORKFLOW_ID:0:7}" \ - --stack-name udapeople-cloudfront - - remove-environ - - remove-migrate - - + --template-file .circleci/files/cloudfront.yml \ + --stack-name InitialStack \ + --parameter-overrides WorkflowID=${CIRCLE_WORKFLOW_ID:0:7} \ + --tags project=udapeople + - destroy-environment: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} + - revert-migrations: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} cleanup: docker: - - image: cimg/base:stable + - image: amazon/aws-cli steps: - checkout - - install_awscli - - install_or_update_node - run: - name: Remove old stacks and files + name: Get old stack workflow id and remove stacks command: | - export STACKS=($(aws cloudformation list-stacks \ - --query "StackSummaries[*].StackName" \ - --stack-status-filter CREATE_COMPLETE --no-paginate --output text)) - echo Stack names: "${STACKS[@]}" - - export OldWorkflowID=$(curl --insecure https://kvdb.io/RKjy7ZzcsUQDe8Jufnew78/old_workflow_id) - echo Old Workflow ID: $OldWorkflowID - + echo <> + export OldWorkflowID=<> + export STACKS=$(aws cloudformation list-stacks \ + --query "StackSummaries[*].StackName" \ + --stack-status-filter CREATE_COMPLETE --no-paginate --output text) if [[ "${STACKS[@]}" =~ "${OldWorkflowID}" ]] then + echo "----------cleaning up stacks------------" aws s3 rm "s3://udapeople-${OldWorkflowID}" --recursive aws cloudformation delete-stack --stack-name "udapeople-backend-${OldWorkflowID}" aws cloudformation delete-stack --stack-name "udapeople-frontend-${OldWorkflowID}" fi + add-prometheus-node-exporter: + docker: + - image: python:3.7-alpine3.16 + steps: + - checkout + - add_ssh_keys: + fingerprints: + - '75:fe:fa:32:56:28:ea:9c:3f:c6:fb:3e:33:44:32:9a' + - run: + name: Install dependencies + command: | + apk add --update aws-cli tar gzip ansible nodejs npm + - attach_workspace: + at: ~/ + - run: + name: Setup Prometheus Node Exporter + command: | + cd .circleci/ansible + cat inventory.txt + ansible-playbook -i inventory.txt node-exporter.yml + - destroy-environment: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} + - revert-migrations: + workflow_id: ${CIRCLE_WORKFLOW_ID:0:7} - - # notify_on_success: - # docker: - # - image: cimg/base:stable - # steps: - # - slack/notify: - # channel: cicd_pipeline - # event: pass - # template: success_tagged_deploy_1 - workflows: default: jobs: @@ -475,14 +419,15 @@ workflows: requires: [build-backend] - scan-frontend: requires: [build-frontend] - - deploy-infrastructure: requires: [test-frontend, test-backend, scan-frontend, scan-backend] filters: branches: - only: [circleci-project-setup] - - configure-infrastructure: + only: [dev-branch] + - add-prometheus-node-exporter: requires: [deploy-infrastructure] + - configure-infrastructure: + requires: [add-prometheus-node-exporter] - run-migrations: requires: [configure-infrastructure] - deploy-frontend: @@ -491,7 +436,7 @@ workflows: requires: [run-migrations] - smoke-test: requires: [deploy-backend, deploy-frontend] - # - cloudfront-update: - # requires: [smoke-test] - # - cleanup: - # requires: [cloudfront-update] \ No newline at end of file + - cloudfront-update: + requires: [smoke-test] + - cleanup: + requires: [cloudfront-update, add-prometheus-node-exporter] diff --git a/.circleci/files/backend.yml b/.circleci/files/backend.yml index bee3237..14362c0 100644 --- a/.circleci/files/backend.yml +++ b/.circleci/files/backend.yml @@ -11,7 +11,7 @@ Resources: Type: AWS::EC2::SecurityGroup Properties: GroupName: !Sub UdaPeople-${ID} - GroupDescription: Allow port 22,9100 and port 3030. + GroupDescription: Allow port 22 and port 3030. SecurityGroupIngress: - IpProtocol: tcp FromPort: 22 @@ -25,10 +25,6 @@ Resources: FromPort: 9100 ToPort: 9100 CidrIp: 0.0.0.0/0 - # - IpProtocol: tcp - # FromPort: 5432 - # ToPort: 5432 - # CidrIp: 0.0.0.0/0 EC2Instance: Type: AWS::EC2::Instance @@ -39,17 +35,7 @@ Resources: KeyName: udacity # If you use another key pair name, you should change this value to match. # If this ami id is not available for you, you can find another (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) # Ubuntu 18 or higher works fine - ImageId: ami-0729e439b6769d6ab # ubuntu 18 us-east-1 #ami-0d70546e43a941d70 #ubuntu 22 us-west-2 + ImageId: ami-0ee8244746ec5d6d4 Tags: - Key: Name Value: !Sub backend-${ID} - - Key: Project - Value: Udapeople - - InstanceEIP: - Type: AWS::EC2::EIP - Properties: - Domain: vpc - InstanceId: !Ref EC2Instance - - \ No newline at end of file diff --git a/.circleci/files/cloudfront.yml b/.circleci/files/cloudfront.yml index c51ef58..0721fa5 100644 --- a/.circleci/files/cloudfront.yml +++ b/.circleci/files/cloudfront.yml @@ -1,16 +1,18 @@ Description: > - Cloudfront distribution for UdaPeople. + Cloudfront distribution for UdaPeople. Create with + ws cloudformation deploy \ + --template-file cloudfront.yml \ + --stack-name InitialStack\ + --parameter-overrides WorkflowID=kk1j287dhjppmz437 Parameters: WorkflowID: Description: Unique identifier. Type: String - Resources: - CloudFrontOriginAccessIdentity: - Type: "AWS::CloudFront::CloudFrontOriginAccessIdentity" + Type: 'AWS::CloudFront::CloudFrontOriginAccessIdentity' Properties: CloudFrontOriginAccessIdentityConfig: Comment: Origin Access Identity for Serverless Static Website @@ -20,10 +22,10 @@ Resources: Properties: DistributionConfig: Origins: - - DomainName: !Sub "udapeople-${WorkflowID}.s3.amazonaws.com" + - DomainName: !Sub 'udapeople-${WorkflowID}.s3.amazonaws.com' Id: webpage S3OriginConfig: - OriginAccessIdentity: !Sub "origin-access-identity/cloudfront/${CloudFrontOriginAccessIdentity}" + OriginAccessIdentity: !Sub 'origin-access-identity/cloudfront/${CloudFrontOriginAccessIdentity}' Enabled: True DefaultRootObject: index.html DefaultCacheBehavior: diff --git a/.circleci/files/prometheusServer.yml b/.circleci/files/prometheusServer.yml new file mode 100644 index 0000000..2f914e0 --- /dev/null +++ b/.circleci/files/prometheusServer.yml @@ -0,0 +1,345 @@ +Description: | + This template deploys prometheus in the VPC vpc-0492b2b84bd1a3194 (us-west-1) + To execute please run this: + aws cloudformation deploy --stack-name prometheus --template-file ${ThisFile.yml} --capabilities CAPABILITY_IAM \ + --parameter-overrides AuthEmail=${YourGmailAddress} EmailPassword=${YourGmailPassword} FromEmail=${YourEmail} ToEmail=${AnyEmailDestination} \ + VPC=${YourVPC} AZ=${YourAZ} Subnet=${YourSubnet} +Parameters: + VPC: + Description: Deploy the instance in this VPC + Type: String + Default: vpc-0492b2b84bd1a3194 + Subnet: + Description: This subnet must already exist + Type: String + Default: subnet-06241d056512881e6 + AZ: + Description: Choose an AZ that corresponds to the Subnet in the !Subnet section + Type: String + Default: us-west-2a + KeyPair: + Description: | + Set a keypair you will use to connect to the instance + You can generate a keypair from CLI by running + `aws ec2 create-key-pair --key-name udacity >> udacity.pem` + where udacity is the name of the KeyPair + Read more here: https://docs.aws.amazon.com/cli/latest/reference/ec2/create-key-pair.html#examples + Type: String + Default: udacity + Image: + Description: | + The ID of the AMI (e.g. ami-0ee8244746ec5d6d4 for Ubuntu 20 deployed in us-west-2 ) + ami-0cfa91bdbc3be780c for Ubuntu 18 in us-west-2 + Type: String + Default: ami-0cfa91bdbc3be780c # ami-0ee8244746ec5d6d4 + LocalIPAddress: + Description: This is your local IP address, it will be used to configure security group ingress + Type: String + Default: 0.0.0.0/0 + AuthEmail: + Description: This is the email (Gmail only!) you will use to authenticate the account + Type: String + EmailPassword: + Description: | + This is the app password for your gmail account. + Get one here https://support.google.com/accounts/answer/185833?hl=en + Type: String + FromEmail: + Description: This is the email to send alerts from (usually same as AuthEmail) + Type: String + ToEmail: + Description: The recipient email for alerts + Type: String + +Resources: + # Security groups + myWebServerSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Security Group for my Web Server Instance + VpcId: !Ref VPC # vpc-0492b2b84bd1a3194 + SecurityGroupIngress: + - CidrIp: + Ref: LocalIPAddress + FromPort: 80 + IpProtocol: tcp + ToPort: 80 + Description: Allows inbound comm on tcp port 80 + - CidrIp: + Ref: LocalIPAddress + FromPort: 9090 + IpProtocol: tcp + ToPort: 9090 + Description: Inbound for Prometheus + - CidrIp: + Ref: LocalIPAddress + FromPort: 8000 + IpProtocol: tcp + ToPort: 8000 + - CidrIp: + Ref: LocalIPAddress + FromPort: 9100 + IpProtocol: tcp + ToPort: 9100 + Description: Inbound for Prometheus Node Exporter + - CidrIp: + Ref: LocalIPAddress + FromPort: 9093 + IpProtocol: tcp + ToPort: 9093 + Description: Inbound for Alert Manager + - CidrIp: + Ref: LocalIPAddress + FromPort: 22 + IpProtocol: tcp + ToPort: 22 + Description: Allows inbound comm on SSH port + SecurityGroupEgress: + - CidrIp: 0.0.0.0/0 + FromPort: -1 + IpProtocol: -1 + ToPort: -1 + Description: Allows outward communication on all ports + # INSTANCES + prometheusInstance: + Type: AWS::EC2::Instance + Properties: + ImageId: !Ref Image + InstanceType: t2.micro + AvailabilityZone: !Ref AZ # !Select [0, !GetAZs ] # You can manually set an availability zone + KeyName: !Ref KeyPair # udacity # Created with `aws ec2 create-key-pair --key-name udacity >> udacity.pem` + NetworkInterfaces: + - DeviceIndex: 0 + AssociatePublicIpAddress: true + SubnetId: !Ref Subnet #subnet-0598050965729f2e9 + GroupSet: + - !Ref myWebServerSecurityGroup + BlockDeviceMappings: + - DeviceName: /dev/sda1 + Ebs: + VolumeType: gp2 + VolumeSize: '20' + DeleteOnTermination: true + Encrypted: false + UserData: + Fn::Base64: !Sub | + #!/bin/bash + sudo apt update -y + sudo apt upgrade -y + sudo apt install nginx -y + sudo systemctl start nginx + sudo systemctl enable nginx + + sudo useradd --no-create-home --shell /bin/false prome + sudo useradd --no-create-home --shell /bin/false node_exporter + # Install Prometheus + sudo mkdir /etc/prometheus + sudo mkdir /var/lib/prometheus + # Download Prometheus and copy to /usr/local/bin + wget https://github.com/prometheus/prometheus/releases/download/v2.0.0/prometheus-2.0.0.linux-amd64.tar.gz + tar xvf prometheus-2.0.0.linux-amd64.tar.gz + sudo cp prometheus-2.0.0.linux-amd64/prometheus /usr/local/bin/ + sudo cp prometheus-2.0.0.linux-amd64/promtool /usr/local/bin/ + sudo chown prome:prome /usr/local/bin/prometheus + sudo chown prome:prome /usr/local/bin/promtool + sudo cp -r prometheus-2.0.0.linux-amd64/consoles /etc/prometheus + sudo cp -r prometheus-2.0.0.linux-amd64/console_libraries /etc/prometheus + sudo chown -R prome:prome /etc/prometheus/consoles + sudo chown -R prome:prome /etc/prometheus/console_libraries + sudo chown -R prome:prome /var/lib/prometheus + rm -rf prometheus-2.0.0.linux-amd64.tar.gz prometheus-2.0.0.linux-amd64 + # Install node exporter + wget https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz + tar xzf node_exporter-1.0.1.linux-amd64.tar.gz + sudo cp node_exporter-1.0.1.linux-amd64/node_exporter /usr/local/bin/node_exporter + sudo chown -R node_exporter:node_exporter /usr/local/bin/node_exporter + rm -rf node_exporter-1.0.1.linux-amd64.tar.gz node_exporter-1.0.1.linux-amd64 + # Install Alertmanager + wget https://github.com/prometheus/alertmanager/releases/download/v0.21.0/alertmanager-0.21.0.linux-amd64.tar.gz + tar xvfz alertmanager-0.21.0.linux-amd64.tar.gz + sudo cp alertmanager-0.21.0.linux-amd64/alertmanager /usr/local/bin/ + sudo cp alertmanager-0.21.0.linux-amd64/amtool /usr/local/bin/ + sudo mkdir /var/lib/alertmanager + + rm -rf alertmanager-0.21.0.linux-amd64.tar.gz alertmanager-0.21.0.linux-amd64 + + # Configure node exporter service + sudo echo "[Unit]" >> /etc/systemd/system/node-exporter.service + sudo echo "Description=Prometheus Node Exporter Service" >> /etc/systemd/system/node-exporter.service + sudo echo "After=network.target" >> /etc/systemd/system/node-exporter.service + sudo echo "" >> /etc/systemd/system/node-exporter.service + sudo echo "[Service]" >> /etc/systemd/system/node-exporter.service + sudo echo "User=node_exporter" >> /etc/systemd/system/node-exporter.service + sudo echo "Group=node_exporter" >> /etc/systemd/system/node-exporter.service + sudo echo "Type=simple" >> /etc/systemd/system/node-exporter.service + sudo echo "ExecStart=/usr/local/bin/node_exporter" >> /etc/systemd/system/node-exporter.service + sudo echo "" >> /etc/systemd/system/node-exporter.service + sudo echo "[Install]" >> /etc/systemd/system/node-exporter.service + sudo echo "WantedBy=multi-user.target" >> /etc/systemd/system/node-exporter.service + # Configure prometheus service + sudo echo "[Unit]" >> /etc/systemd/system/prometheus.service + sudo echo "Description=Prometheus" >> /etc/systemd/system/prometheus.service + sudo echo "Wants=network-online.target" >> /etc/systemd/system/prometheus.service + sudo echo "After=network-online.target" >> /etc/systemd/system/prometheus.service + sudo echo "" >> /etc/systemd/system/prometheus.service + sudo echo "[Service]" >> /etc/systemd/system/prometheus.service + sudo echo "User=prome" >> /etc/systemd/system/prometheus.service + sudo echo "Group=prome" >> /etc/systemd/system/prometheus.service + sudo echo "Type=simple" >> /etc/systemd/system/prometheus.service + sudo echo "ExecStart=/usr/local/bin/prometheus --config.file /etc/prometheus/prometheus.yml --storage.tsdb.path /var/lib/prometheus/ --web.console.templates=/etc/prometheus/consoles --web.console.libraries=/etc/prometheus/console_libraries" >> /etc/systemd/system/prometheus.service + sudo echo "" >> /etc/systemd/system/prometheus.service + sudo echo "[Install]" >> /etc/systemd/system/prometheus.service + sudo echo "WantedBy=multi-user.target" >> /etc/systemd/system/prometheus.service + # Configure Alertmanager service + sudo echo "[Unit]" >> /etc/systemd/system/alertmanager.service + sudo echo "Description=Alert Manager" >> /etc/systemd/system/alertmanager.service + sudo echo "Wants=network-online.target" >> /etc/systemd/system/alertmanager.service + sudo echo "After=network-online.target" >> /etc/systemd/system/alertmanager.service + sudo echo "" >> /etc/systemd/system/alertmanager.service + sudo echo "[Service]" >> /etc/systemd/system/alertmanager.service + sudo echo "User=prome" >> /etc/systemd/system/alertmanager.service + sudo echo "Group=prome" >> /etc/systemd/system/alertmanager.service + sudo echo "Type=simple" >> /etc/systemd/system/alertmanager.service + sudo echo "ExecStart=/usr/local/bin/alertmanager --config.file=/etc/prometheus/alertmanager.yml --storage.path=/var/lib/alertmanager" >> /etc/systemd/system/alertmanager.service + sudo echo "" >> /etc/systemd/system/alertmanager.service + sudo echo "Restart=always" >> /etc/systemd/system/alertmanager.service + sudo echo "[Install]" >> /etc/systemd/system/alertmanager.service + sudo echo "WantedBy=multi-user.target" >> /etc/systemd/system/alertmanager.service + + # Create prometheus config + echo "global:" > prometheus.yml + echo " scrape_interval: 15s" >> prometheus.yml + echo " evaluation_interval: 15s" >> prometheus.yml + # Rule files + echo "rule_files:" >> prometheus.yml + echo " - /etc/prometheus/rules.yml" >> prometheus.yml + # Alerts + echo "alerting:" >> prometheus.yml + echo " alertmanagers:" >> prometheus.yml + echo " - static_configs:" >> prometheus.yml + echo " - targets:" >> prometheus.yml + echo " - localhost:9093" >> prometheus.yml + # JOBS + echo "scrape_configs:" >> prometheus.yml + # prometheus job + # echo " - job_name: 'prometheus'" >> prometheus.yml + # echo " scrape_interval: 5s" >> prometheus.yml + # echo " static_configs:" >> prometheus.yml + # echo " - targets: ['localhost:9090']" >> prometheus.yml + # autodiscover EC2 instances job + echo " - job_name: 'node'" >> prometheus.yml + echo " ec2_sd_configs:" >> prometheus.yml + echo " - region: us-west-2" >> prometheus.yml + echo " access_key: ${IAMUserAccessKey}" >> prometheus.yml + echo " secret_key: ${IAMUserAccessKey.SecretAccessKey}" >> prometheus.yml + echo " port: 9100" >> prometheus.yml + # Reposition prometheus.yml + sudo mv prometheus.yml /etc/prometheus/prometheus.yml + + # Create rules + echo "groups:" > /etc/prometheus/rules.yml + echo "- name: Down" >> /etc/prometheus/rules.yml + echo " rules:" >> /etc/prometheus/rules.yml + echo " - alert: InstanceDown" >> /etc/prometheus/rules.yml + echo " expr: up == 0" >> /etc/prometheus/rules.yml + echo " for: 3m" >> /etc/prometheus/rules.yml + echo " labels:" >> /etc/prometheus/rules.yml + echo " severity: 'critical'" >> /etc/prometheus/rules.yml + echo " annotations:" >> /etc/prometheus/rules.yml + echo " summary: 'Instance is down'" >> /etc/prometheus/rules.yml + echo " description: ' of job has been down for more than 3 minutes.'" >> /etc/prometheus/rules.yml + + # Create alertmanager config + echo "route:" > alertmanager.yml + echo " group_by: [Alertname]" >> alertmanager.yml + echo " receiver: email-me" >> alertmanager.yml + echo "" >> alertmanager.yml + echo "receivers:" >> alertmanager.yml + echo " - name: email-me" >> alertmanager.yml + echo " email_configs:" >> alertmanager.yml + echo " - to: ${ToEmail}" >> alertmanager.yml + echo " from: ${FromEmail}" >> alertmanager.yml + echo " smarthost: smtp.gmail.com:587" >> alertmanager.yml + echo " auth_username: ${AuthEmail}" >> alertmanager.yml + echo " auth_identity: ${AuthEmail}" >> alertmanager.yml + echo " auth_password: ${EmailPassword}" >> alertmanager.yml + # Reposition alertmanager.yml + sudo mv alertmanager.yml /etc/prometheus/alertmanager.yml + # csuzceqgjrskelub + # Alertmanager rules + echo "groups:" > rules.yml + echo " - name: Down" >> rules.yml + echo " rules:" >> rules.yml + echo " - alert: InstanceDown" >> rules.yml + echo " expr: up == 0" >> rules.yml + echo " labels:" >> rules.yml + echo " severity:" >> rules.yml + echo " annotations:" >> rules.yml + echo " summary: Instance is down" >> rules.yml + echo " descriptions: of job has ben down for more than 3 minutes" >> rules.yml + # Reposition rules.yml + sudo mv rules.yml /etc/prometheus/rules.yml + + # Enable and (Re)start services + sudo systemctl daemon-reload + sudo systemctl start prometheus + sudo systemctl enable prometheus + sudo systemctl enable node-exporter + sudo systemctl start node-exporter + sudo systemctl enable alertmanager + sudo systemctl start alertmanager + Tags: + - Key: Name + Value: Prometheus Server + # Roles + IAMUser: + Type: AWS::IAM::User + Properties: + Path: '/' + Policies: + - PolicyName: AmazonEC2ReadOnlyAccess + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: + - ec2:Describe* + - elasticloadbalancing:Describe* + - cloudwatch:List* + - cloudwatch:Get* + - cloudwatch:Describe* + - autoscaling:Describe* + Resource: '*' + IAMUserAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref IAMUser + +Outputs: + Instance: + Description: EC2 instance + Value: !Ref prometheusInstance + PublicIP: + Description: Public IP Address + Value: !GetAtt prometheusInstance.PublicIp + PrometheusEndpoint: + Description: Prometheus Endpoint + Value: + !Join ['', ['http://', !GetAtt prometheusInstance.PublicDnsName, ':9090']] + PrometheusNodeExporterEndpoint: + Description: Prometheus Node Exporter Endpoint + Value: + !Join ['', ['http://', !GetAtt prometheusInstance.PublicDnsName, ':9100']] + PrometheusAlertManagerEndpoint: + Description: Prometheus Alert Manager Endpoint + Value: + !Join ['', ['http://', !GetAtt prometheusInstance.PublicDnsName, ':9093']] + AccessKeyID: + Description: Access Key for IAMUser + Value: !Ref IAMUserAccessKey + Secret: + Description: Secret Key for IAMUser + Value: !GetAtt IAMUserAccessKey.SecretAccessKey + EmailPwd: + Description: Email password + Value: !Ref EmailPassword diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..041673d --- /dev/null +++ b/.gitignore @@ -0,0 +1,70 @@ +*~ +*.sw[mnpcod] +*.log +*.tmp +*.tmp.* +log.txt +*.sublime-project +*.sublime-workspace +.vscode/ +npm-debug.log* + +.idea/ +.ionic/ +.sourcemaps/ +.sass-cache/ +.tmp/ +.versions/ +coverage/ +dist/ +node_modules/ +tmp/ +temp/ +platforms/ +plugins/ +plugins/android.json +plugins/ios.json +$RECYCLE.BIN/ +postgres_dev/ +logfile + +.DS_Store +Thumbs.db +UserInterfaceState.xcuserstate +node_modules +venv/ +*~ +*.sw[mnpcod] +*.log +*.tmp +*.tmp.* +log.txt +*.sublime-project +*.sublime-workspace +.vscode/ +npm-debug.log* + +.idea/ +.ionic/ +.sourcemaps/ +.sass-cache/ +.tmp/ +.versions/ +coverage/ +www/ +node_modules/ +tmp/ +temp/ +platforms/ +plugins/ +plugins/android.json +plugins/ios.json +$RECYCLE.BIN/ + +.DS_Store +Thumbs.db +UserInterfaceState.xcuserstate +node_modules + +.env +dist \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..2a6bcb2 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @udacity/active-public-content \ No newline at end of file diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..d6e297c --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,15 @@ + +Copyright © 2012 - 2020, Udacity, Inc. + +Udacity hereby grants you a license in and to the Educational Content, including but not limited to homework assignments, programming assignments, code samples, and other educational materials and tools (as further described in the Udacity Terms of Use), subject to, as modified herein, the terms and conditions of the Creative Commons Attribution-NonCommercial- NoDerivs 3.0 License located at http://creativecommons.org/licenses/by-nc-nd/4.0 and successor locations for such license (the "CC License") provided that, in each case, the Educational Content is specifically marked as being subject to the CC License. +Udacity expressly defines the following as falling outside the definition of "non-commercial": +(a) the sale or rental of (i) any part of the Educational Content, (ii) any derivative works based at least in part on the Educational Content, or (iii) any collective work that includes any part of the Educational Content; +(b) the sale of access or a link to any part of the Educational Content without first obtaining informed consent from the buyer (that the buyer is aware that the Educational Content, or such part thereof, is available at the Website free of charge); +(c) providing training, support, or editorial services that use or reference the Educational Content in exchange for a fee; +(d) the sale of advertisements, sponsorships, or promotions placed on the Educational Content, or any part thereof, or the sale of advertisements, sponsorships, or promotions on any website or blog containing any part of the Educational Material, including without limitation any "pop-up advertisements"; +(e) the use of Educational Content by a college, university, school, or other educational institution for instruction where tuition is charged; and +(f) the use of Educational Content by a for-profit corporation or non-profit entity for internal professional development or training. + + + +THE SERVICES AND ONLINE COURSES (INCLUDING ANY CONTENT) ARE PROVIDED "AS IS" AND "AS AVAILABLE" WITH NO REPRESENTATIONS OR WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. YOU ASSUME TOTAL RESPONSIBILITY AND THE ENTIRE RISK FOR YOUR USE OF THE SERVICES, ONLINE COURSES, AND CONTENT. WITHOUT LIMITING THE FOREGOING, WE DO NOT WARRANT THAT (A) THE SERVICES, WEBSITES, CONTENT, OR THE ONLINE COURSES WILL MEET YOUR REQUIREMENTS OR EXPECTATIONS OR ACHIEVE THE INTENDED PURPOSES, (B) THE WEBSITES OR THE ONLINE COURSES WILL NOT EXPERIENCE OUTAGES OR OTHERWISE BE UNINTERRUPTED, TIMELY, SECURE OR ERROR-FREE, (C) THE INFORMATION OR CONTENT OBTAINED THROUGH THE SERVICES, SUCH AS CHAT ROOM SERVICES, WILL BE ACCURATE, COMPLETE, CURRENT, ERROR- FREE, COMPLETELY SECURE OR RELIABLE, OR (D) THAT DEFECTS IN OR ON THE SERVICES OR CONTENT WILL BE CORRECTED. YOU ASSUME ALL RISK OF PERSONAL INJURY, INCLUDING DEATH AND DAMAGE TO PERSONAL PROPERTY, SUSTAINED FROM USE OF SERVICES. diff --git a/README.md b/README.md index 6923776..2ad7d2f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,56 @@ +[![ecovate](https://circleci.com/gh/ecovate/cdond-c3-projectstarter.svg?style=svg)](https://app.circleci.com/pipelines/github/ecovate/cdond-c3-projectstarter) +We are archiving this repository because we do not want learners to push personal development to the current repository. If you have any issues or suggestions to make, feel free to: +- Utilize the https://knowledge.udacity.com/ forum to seek help on content-specific issues. +- [Submit a support ticket](https://udacity.zendesk.com/hc/en-us/requests/new) along with the link to your forked repository. +- If you are an enterprise learner, please [Submit a support ticket here](https://udacityenterprise.zendesk.com/hc/en-us/requests/new?ticket_form_id=360000279131) + ## Give your Application Auto-Deploy Superpowers +In this project, you will prove your mastery of the following learning objectives: + +- Explain the fundamentals and benefits of CI/CD to achieve, build, and deploy automation for cloud-based software products. +- Utilize Deployment Strategies to design and build CI/CD pipelines that support Continuous Delivery processes. +- Utilize a configuration management tool to accomplish deployment to cloud-based servers. +- Surface critical server errors for diagnosis using centralized structured logging. + +![Diagram of CI/CD Pipeline we will be building.](udapeople.png) + +### Instructions + +* [Selling CI/CD](instructions/0-selling-cicd.md) +* [Getting Started](instructions/1-getting-started.md) +* [Deploying Working, Trustworthy Software](instructions/2-deploying-trustworthy-code.md) +* [Configuration Management](instructions/3-configuration-management.md) +* [Turn Errors into Sirens](instructions/4-turn-errors-into-sirens.md) + +### Project Submission + +For your submission, please submit the following: + +- A text file named `urls.txt` including: + 1. Public Url to GitHub repository (not private) [URL01] + 1. Public URL for your S3 Bucket (aka, your green candidate front-end) [URL02] + 1. Public URL for your CloudFront distribution (aka, your blue production front-end) [URL03] + 1. Public URLs to deployed application back-end in EC2 [URL04] + 1. Public URL to your Prometheus Server [URL05] +- Your screenshots in JPG or PNG format, named using the screenshot number listed in the instructions. These screenshots should be included in your code repository in the root folder. + 1. Job failed because of compile errors. [SCREENSHOT01] + 1. Job failed because of unit tests. [SCREENSHOT02] + 1. Job that failed because of vulnerable packages. [SCREENSHOT03] + 1. An alert from one of your failed builds. [SCREENSHOT04] + 1. Appropriate job failure for infrastructure creation. [SCREENSHOT05] + 1. Appropriate job failure for the smoke test job. [SCREENSHOT06] + 1. Successful rollback after a failed smoke test. [SCREENSHOT07] + 1. Successful promotion job. [SCREENSHOT08] + 1. Successful cleanup job. [SCREENSHOT09] + 1. Only deploy on pushed to `master` branch. [SCREENSHOT10] + 1. Provide a screenshot of a graph of your EC2 instance including available memory, available disk space, and CPU usage. [SCREENSHOT11] + 1. Provide a screenshot of an alert that was sent by Prometheus. [SCREENSHOT12] + +- Your presentation should be in PDF format named "presentation.pdf" and should be included in your code repository root folder. + +Before you submit your project, please check your work against the project rubric. If you haven’t satisfied each criterion in the rubric, then revise your work so that you have met all the requirements. + ### Built With - [Circle CI](www.circleci.com) - Cloud-based CI/CD service @@ -8,3 +59,7 @@ - [CloudFormation](https://aws.amazon.com/cloudformation/) - Infrastrcuture as code - [Ansible](https://www.ansible.com/) - Configuration management tool - [Prometheus](https://prometheus.io/) - Monitoring tool + +### License + +[License](LICENSE.md) diff --git a/backend/package.json b/backend/package.json index a8cd84f..95ea821 100644 --- a/backend/package.json +++ b/backend/package.json @@ -13,7 +13,7 @@ "prestart:prod": "rimraf dist && tsc", "start:prod": "node dist/main.js", "start:hmr": "node dist/server", - "lint": "tslint -p tsconfig.json -c tslint.json", + "lint": "tslint -p tsconfig.json -c tslint.json --fix", "test": "jest --runInBand", "test:watch": "jest --watch", "test:cov": "jest --coverage", @@ -39,7 +39,7 @@ "@nestjs/jwt": "^6.0.0", "@nestjs/passport": "^6.2.0", "@nestjs/platform-express": "^6.11.11", - "@nestjs/swagger": "^4.6.1", + "@nestjs/swagger": "^3.1.0", "@nestjs/typeorm": "^6.3.4", "@types/jsonwebtoken": "^7.2.8", "@types/winston": "^2.4.4", @@ -79,7 +79,7 @@ "rimraf": "^2.6.2", "standard-version": "^7.0.0", "supertest": "^3.4.2", - "ts-jest": "^26.4.0", + "ts-jest": "^24.3.0", "ts-loader": "^4.4.2", "ts-node": "^7.0.1", "tsconfig-paths": "^3.9.0", @@ -90,11 +90,6 @@ "webpack-node-externals": "^1.7.2" }, "jest": { - "globals": { - "ts-jest": { - "isolatedModules": true - } - }, "moduleFileExtensions": [ "js", "json", diff --git a/backend/production.env b/backend/production.env deleted file mode 100644 index b088537..0000000 --- a/backend/production.env +++ /dev/null @@ -1,13 +0,0 @@ -NODE_ENV=production -VERSION=1 -TYPEORM_CONNECTION=postgres -TYPEORM_MIGRATIONS_DIR=./migrations -TYPEORM_ENTITIES=./modules/domain/**/*.entity.ts -TYPEORM_MIGRATIONS=./migrations/*.ts - -# Things you can change if you wish... -TYPEORM_HOST=udapeople-db.coqne9c9ehci.us-east-1.rds.amazonaws.com -TYPEORM_PORT=5432 -TYPEORM_USERNAME=postgres -TYPEORM_PASSWORD=g05pLAnveKWS7FjBjtOL -TYPEORM_DATABASE=glee \ No newline at end of file diff --git a/backend/src/main.ts b/backend/src/main.ts index 360c846..7eb3e85 100644 --- a/backend/src/main.ts +++ b/backend/src/main.ts @@ -28,7 +28,7 @@ async function bootstrap() { .setVersion('1.0') .addTag('customTag') .setBasePath(apiVersionPrefix) - .addBearerAuth()// here is an intentional compile error. Remove the "x" and the backend should compile. + .addBearerAuth() // here is an intentional compile error. Remove the "x" and the backend should compile. .build(); const document = SwaggerModule.createDocument(app, options); SwaggerModule.setup(`api/${apiVersionPrefix}`, app, document); diff --git a/instructions/0-selling-cicd.md b/instructions/0-selling-cicd.md new file mode 100644 index 0000000..cfed46e --- /dev/null +++ b/instructions/0-selling-cicd.md @@ -0,0 +1,7 @@ +## Section 1 - Explain the Fundamentals and Benefits of CI/CD to Achieve, Build, and Deploy Automation for Cloud-Based Software Products + +You are leading a team to develop the UdaPeople product, a revolutionary concept in Human Resources which promises to help small businesses care better for their most valuable resource: their people. Before implementing CI/CD for the UdaPeople product, you need to get authorization from the people who write the checks. Create a proposal in document or presentation form that “sells” the concept of CI/CD to non-technical decision-makers in the UdaPeople organization. For this, you will need to step out of your technical world and step into the world of revenue and costs. You will need to translate the benefits of CI/CD from technical language to the values of the business. To appeal to what makes business people tick, you’ll need to focus your attention on benefits that create revenue, protect revenue, control costs or reduce costs. + +The deliverable should be “near-production-quality”, but you should try to time-box your work to about 30 minutes. In other words, it should be good enough to submit to a real boss in a real job. No messy, last-minute submissions. You may use public domain or open source templates and graphics if you’d like. But please make sure the content is your own. Your presentation should be no longer than 5 slides. Your boss likes presentations that are short and sweet! + +Your presentation should be in PDF format named "presentation.pdf" and should be included in your code repository root folder. diff --git a/instructions/1-getting-started.md b/instructions/1-getting-started.md new file mode 100644 index 0000000..b9c95a6 --- /dev/null +++ b/instructions/1-getting-started.md @@ -0,0 +1,128 @@ +## Getting Started + +Instructions for how to get a copy of the project running on your local machine. + +### Dependencies + +* Git SCM +* SSH client like OpenSSH +* NodeJs v10 or higher (if you plan on compiling locally) + +### Starter Code + +Please watch the [video walkthrough of the starter code here](https://www.youtube.com/watch?v=ODLIAe28OJk). + +1. Clone the [starter code](https://github.com/udacity/cdond-c3-projectstarter) to your machine so that you can manipulate the files. +2. Push your code into a repository in your account in Github. You might consider making your repository public so that Circle CI will give you more credits to run builds ([more information here](https://circleci.com/open-source/)). + +### Provided Cloud Formation Templates + +For your convenience, we have provided some CloudFormation templates that you can use throughout the deployment phase of your project. You can find those templates in [this folder](https://github.com/udacity/cdond-c3-projectstarter/tree/master/.circleci/files). + +### Intentionally Failing Jobs + +We left a scaffolded `config.yml` for you [here](https://github.com/udacity/cdond-c3-projectstarter/blob/master/.circleci/config.yml) to help you get started with CirlcCI's configuration. To call attention to unfinished jobs, we left some "non-zero error codes" (e.g. `exit 1`) for you to remove when you have finished implementing a job. + +### Compiling/Running Locally (Optional) + +**PLEASE NOTE:** It is NOT necessary that you compile and run the project locally. The goal of this project is for you to show mastery in management of CI/CD systems, not React/NodeJS web applications. If you are experienced with React/NodeJS or don't mind an extra challenge, then be our guest! But, you can perfectly complete this project without compiling or running the code locally. + +The instructions and information that follows should help you build, test and deploy the web application either locally or in CI/CD. + +This is a "mono-repository" which means multiple servers or layers exist in the same repository. You'll find the following main folders: + +- `./frontend` +- `./backend` + +#### 1. Install dependencies in both `frontend` and `backend` folders. + +From your `cdond-cd-projectstarter` folder, use the commands: +```bash +cd frontend +npm i +``` +From your `cdond-cd-projectstarter` folder, use the commands: +```bash +cd backend +npm i +``` + +#### 2. Create `.env` file for database connection info. + +Add a `.env` file to your `backend` folder with the following contents: + +```bash +NODE_ENV=local +VERSION=1 +TYPEORM_CONNECTION=postgres +TYPEORM_MIGRATIONS_DIR=./src/migrations +TYPEORM_ENTITIES=./src/modules/domain/**/*.entity.ts +TYPEORM_MIGRATIONS=./src/migrations/*.ts + +# Things you can change if you wish... +TYPEORM_HOST=localhost +TYPEORM_PORT=5532 +TYPEORM_USERNAME=postgres +TYPEORM_PASSWORD=password +TYPEORM_DATABASE=glee +``` + +You can use your own Postgres server if you wish or you can use the Docker-Compose template we provided in the `./utils` folder. + +## Running PostgreSQL in Docker-Compose + +For convenience, we have provided a template that you can use to easily run a Postgres database for local testing. To run this template, you'll need to install Docker and Docker-Compose. + +To start the database, you will use the following commands from your `cdond-cd-projectstarter` folder: +```bash +cd util +docker-compose up +``` + +## Compiling the Code + +You can compile the code from your `cdond-cd-projectstarter` folder using the following: +```bash +cd frontend +npm run build +``` + +```bash +cd backend +npm run build +``` + +**WARNING:** There are some errors in both front-end and back-end that will make any attempt to compile FAIL when you first clone the repo. These errors are **intentional**. There are steps in the project that require a build to break in Circle CI. Please don't fix these errors until instructed to do so later on. + +## Testing, Migrating, Running + +As the warning says above, it won't be possible to run most of the code in the project until later on when you are instructed to fix some errors. So, you may not be able to try the following commands right now. We are providing them here as a reference. + +Most of the tasks needed to build, test and deploy the application are simplified by "npm scripts" that are found in the `package.json` for either front-end or back-end. For any of these scripts, you will need to `cd` into the respective folder and then run the script using the command `npm run [script name]`. Here are the most relevant scripts: + +| Name | Purpose | Notes | +| :-- | :-- | :-- | +| migrations | Run migration which checks for any migration scripts that have not yet been applied to the db and runs them. |Make sure you have a Postgres database running and your `.env` file is configured correctly. If you get connection errors from the backend when you start it, then chances are your DB is not running or the `.env` doesn't have the correct DB connection information. | +| migrations:revert | Revert last successfully executed migration. | The same connection configuration is needed here as with the `migrations` script above. | +| test | Run all unit tests. | | +| build | Compiles the code. | Drops the compiled code in the `./dist` folder. | +| start | Starts up the application locally. | Make sure you have a Postgres database running and your `.env` file is configured correctly. If you get connection errors from the backend when you start it, then chances are your DB is not running or the `.env` doesn't have the correct DB connection information.| + +### Examples: + +This should compile the code and then list the result in the `./dist` folder: + +```bash +cd frontend +npm run build +cd dist +ls +``` + +... or revert the last migration that ran: + +```bash +cd backend +npm run migrations:revert +``` + diff --git a/instructions/2-deploying-trustworthy-code.md b/instructions/2-deploying-trustworthy-code.md new file mode 100644 index 0000000..efcd78b --- /dev/null +++ b/instructions/2-deploying-trustworthy-code.md @@ -0,0 +1,79 @@ +## Section 2 - Utilize Deployment Strategies to Design and Build CI/CD Pipelines that Support Continuous Delivery Processes + +### Circle CI + +Circle CI is only one of many options for CI/CD tools. It is a “software as a service” and has a [free account](https://circleci.com/signup/?source-button=free) that you can use throughout this project, which is ideal for UdaPeople since it’s a start-up running on a shoestring budget! + +1. [Create an account](https://circleci.com/signup/?source-button=free) with circleci.com if you haven't already. We recommend the free tier for this course. It includes 2500 credits per week which equals around 70 builds. This should be enough as long as you are conservative with your builds. _If you run out of credits, you can create another account and continue working._ +2. Create a new project in Circle CI using your GitHub repo. + - Notice the `.circleci` folder. This is where your jobs will go. +3. Ensure a workflow starts with the jobs in your `.config` file. If you need to take a look at some samples, Circle CI was nice enough to [give us a few](https://circleci.com/docs/2.0/sample-config). + +Please watch the [video walkthrough of setting up CircleCI here](https://www.youtube.com/watch?v=SRBmYjUF-tA). + +### Screenshots and URLs + +Throughout this project, you will be asked to take screenshots or provide URLs to aid in the evaluation process once you're done with the project. It's worth mentioning here since it's much harder to harvest some screen shots once you've passed certain milestones. **It's best if you take screenshots along the way and store them in a folder on your computer until you're ready to turn the project in.** Also, it's good to keep a document or notepad with the list of urls that are requested. + +### To Do + +- Make sure commits/pushes to repo trigger the CI/CD pipeline (this should be automatic after connecting CircleCI to your Github repo). + +#### 1. Build Phase + +The goal of a build phase is to compile or lint the source code to check for syntax errors or unintentional typos in code. It’s your first line of defense against bugs as you attempt to integrate the pieces of your project together. This is especially important to UdaPeople because we don’t want to waste credits or time running other steps if the code can’t even compile. + +- Find the job named `build-frontend` in the `.circleci/config.yml` file. + - Add code to build/compile the front-end. +- Find another job named `build-backend` in the `.circleci/config.yml` file. + - Add code to build/compile the back-end. +- Notice that both jobs have selected a Docker image that is compatible with NodeJS. +``` + - image: circleci/node:13.8.0 +``` +- Throughout this project, you should have separate jobs for the front-end and back-end so that failure alerts are more descriptive. +- Jobs should fail if code cannot be compiled (fail for the right reasons), and **a failed build should stop all future jobs.** +- We have provided an easy-to-fix compile error in the code to prove the jobs fail. Provide a screenshot of jobs that failed because of compile errors. **[SCREENSHOT01]** +![Job properly failing because of compile errors.](screenshots/SCREENSHOT01.png) +- Fix the compile error so that the pipeline can continue (see code-comment that guides you to the fix). + +#### 2. Test Phase + +Unit tests are one of the many very important building blocks of a system that enables Continuous Delivery (notice, we didn’t say “the only or most important thing”). UdaPeople believes that tests should come first just like they do in the scientific method. So, if a test fails, it's because the code is no longer trustworthy. Only trustworthy code should get a ticket to continue the ride! + +- Find the jobs named `test-frontend` and `test-backend` in the config file. + - For both jobs, select a Docker image that is compatible with NodeJS. + - Write code to run all the unit tests in both layers. +- Remember, we separate the frontend and backend into separate jobs! +- A unit test job should fail the job and prevent any future jobs from running. +- We have provided one failing test in both front-end and back-end. Provide a screenshot of the failed unit tests in the "Test Failures" tab. **[SCREENSHOT02]** +![Job properly failing because of test failures.](screenshots/SCREENSHOT02.png) +- Fix the unit tests and make the job succeed. + +#### 3. Analyze Phase + +UdaPeople handles some private information like social security numbers, salary amount, etc. It would be a shame if a package with a known vulnerability left a security hole in our application, giving hackers access to that information! That’s why we should include a job that checks for known vulnerabilities every time we check in new code. + +- Find the jobs named `scan-frontend` and `scan-backend` in the config file. + - For both jobs, select a Docker image that is compatible with NodeJS. + - Write code to check for security vulnerabilities in the packages used in the application. + - Use `npm` to “audit” the code to check for known package vulnerabilities. Just `cd` into the directory of front-end and back-end and run the following: +```bash +npm audit --audit-level=critical +``` +- Job should fail if any major vulnerabilities are found (fail for the right reasons). We left you an intentional vulnerability to cause a failure. Provide a screenshot of jobs that failed because of vulnerable packages listed. **[SCREENSHOT03]** +![Job properly failing because of security vulnerabilities.](screenshots/SCREENSHOT03.png) +- Fix the vulnerability using the command below and re-run the job. +```bash +npm audit fix --audit-level=critical --force +``` +- A failed analysis should stop all future jobs. + +#### 4. Alerts + +When a build fails for any reason, the UdaPeople dev team needs to know about it. That way they can jump in and save the day (the day that they almost ruined by checking in bad code… but we digress). We’re going to add an alert so that botched builds raise a nice wavy red flag. + +- Integrate Slack, email or another communication tool to receive alerts when jobs fail. Our examples are using Slack, but you should feel free to use the communication tool to which you are most accustomed. +- Alerts should include a summary of what happened and a link to the job console output for quick troubleshooting. +- Provide a screenshot of an alert from one of your failed builds. **[SCREENSHOT04]** +![An alert when the build breaks.](screenshots/SCREENSHOT04.png) diff --git a/instructions/3-configuration-management.md b/instructions/3-configuration-management.md new file mode 100644 index 0000000..378ed4a --- /dev/null +++ b/instructions/3-configuration-management.md @@ -0,0 +1,227 @@ +## Section 3 - Utilize a Configuration Management Tool to Accomplish Deployment to Cloud-Based Servers + +In this section, you will practice creating and configuring infrastructure before deploying code to it. You will accomplish this by preparing your AWS and CircleCI accounts just a bit, then by building Ansible Playbooks for use in your CircleCI configuration. + +### Setup + +#### AWS +1. Create and download a new key pair in AWS for CircleCI to use to work with AWS resources. Name this key pair "udacity" so that it works with your Cloud Formation templates. [This tutorial may help](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html#having-ec2-create-your-key-pair) (look for "Option 1: Create a key pair using Amazon EC2"). You'll be using this key pair (pem file) in future steps so keep it in a memorable location. +2. Create IAM user for programmatic access only and copy the id and access keys. [This tutorial may help.](https://serverless-stack.com/chapters/create-an-iam-user.html) You'll need these keys if you want to try any AWS commands from your own command line. You'll also need these credentials to add to CircleCI configuration in the next steps. +3. Add a PostgreSQL database in RDS that has **public accessibility**. Take note of the connection details (hostname, username, password). [This tutorial may help.](https://aws.amazon.com/getting-started/tutorials/create-connect-postgresql-db/) As long as you marked "Public Accessibility" as "yes", you won't need to worry about VPC settings or security groups. + +Please watch the [video walkthrough of preparing AWS here](https://www.youtube.com/watch?v=d1W1HUz8yRw). + +### CloudFront Distribution Primer + +At the very end of the pipeline, you will need to make a switch from the old infrastructure to the new as you learned about with the Blue Green Deployment strategy. We will use CloudFormation and CloudFront to accomplish this. However, for this to work, you must do a few things manually: + +1) Create a random string (e.g. `kk1j287dhjppmz437`) for use in next steps. +2) Create an S3 Bucket with a name that combines "udapeople" and the random string (e.g. "udapeople-kk1j287dhjppmz437"). If S3 complains that the name is already taken, just choose another random string. The random string is to distinguish your bucket from other student buckets. +3) Run our provided [Cloud Formation](https://github.com/udacity/cdond-c3-projectstarter/blob/master/.circleci/files/cloudfront.yml) template locally (for the Workflow ID parameter, use your random string). + +Once that is done, subsequent executions of that template will modify the same CloudFront distribution to make the blue-to-green switch without fail. + +#### Circle CI + +Please watch the [video walkthrough of setting up your secrets here](https://www.youtube.com/watch?v=caFJQ1YwVdU). + +1. Add SSH Key pair from EC2 as shown [here](https://circleci.com/docs/2.0/add-ssh-key/). To get the actual key pair, you'll need to open the pem file in a text editor and copy the contents. Then you can paste them into Circle CI. + +2. Add the following environment variables to your Circle CI project by navigating to {project name} > Settings > Environment Variables as shown [here](https://circleci.com/docs/2.0/settings/): + - `AWS_ACCESS_KEY_ID`=(from IAM user with programmatic access) + - `AWS_SECRET_ACCESS_KEY`= (from IAM user with programmatic access) + - `AWS_DEFAULT_REGION`=(your default region in aws) + - `TYPEORM_CONNECTION`=`postgres` + - `TYPEORM_MIGRATIONS_DIR`=`./src/migrations` + - `TYPEORM_ENTITIES`=`./src/modules/domain/**/*.entity.ts` + - `TYPEORM_MIGRATIONS`=`./src/migrations/*.ts` + - `TYPEORM_HOST`={your postgres database hostname in RDS} + - `TYPEORM_PORT`=`5532` (or the port from RDS if it’s different) + - `TYPEORM_USERNAME`={your postgres database username in RDS} + - `TYPEORM_PASSWORD`={your postgres database password in RDS} + - `TYPEORM_DATABASE`={your postgres database name in RDS} + +_NOTE:_ Some AWS-related jobs may take awhile to complete. If a job takes too long, it could cause a timeout. If this is the case, just restart the job and keep your fingers crossed for faster network traffic. If this happens often, you might consider increasing the job timeout [as described here](https://support.circleci.com/hc/en-us/articles/360007188574-Build-has-hit-timeout-limit). + +### To Do + +#### 1. Infrastructure Phase + +Setting up servers and infrastructure is complicated business. There are many, many moving parts and points of failure. The opportunity for failure is massive when all that infrastructure is handled manually by human beings. Let’s face it. We’re pretty horrible at consistency. That’s why UdaPeople adopted the IaC (“Infrastructure as Code”) philosophy after “Developer Dave” got back from the last DevOps conference. We’ll need a job that executes some CloudFormation templates so that the UdaPeople team never has to worry about a missed deployment checklist item. + +In this phase, you will add CircleCI jobs that execute Cloud Formation templates that create infrastructure as well as jobs that execute Ansible Playbooks to configure that newly created infrastructure. + +##### Create/Deploy Infrastructure + +- Find the job named `deploy-infrastructure` in your config file + - Add code to create your infrastructure using [CloudFormation templates](https://github.com/udacity/cdond-c3-projectstarter/tree/master/.circleci/files). Again, provide a screenshot demonstrating an appropriate job failure (failing for the right reasons). **[SCREENSHOT05]** + +![Job properly failing because of an error when creating infrastructure.](screenshots/SCREENSHOT05.png) + + - Select a Docker image that supports the AWS CLI + - Create backend infrastructure by editing the step named `Ensure back-end infrastructure exists`. You'll notice you need to edit the `--tags`, `--stack-name`, and `--parameter-overrides` with your information. Make sure to remove each `#` to uncomment the lines after you've added your information. + - Use the workflow id to mark your CloudFormation stacks so that you can reference them later on (ex: rollback). If you'd like, you can use the parameterized CloudFormation templates we provided. + - Programmatically create a new EC2 Instance for your back-end. + - Make sure the EC2 instance has your back-end port opened up to public traffic (default port 3030). + - Programmatically save the new back-end url to memory or disk for later use (the front-end needs it). This could be done with [MemStash.io](https://memstash.io). + - Tag the back-end infrastructure so that it can be referenced later. + - Create frontend by editing the step named `Ensure front-end infrastructure exist`. Again, add your information and remove the `#` to uncomment the appropriate lines. + - Use a CloudFormation template to create a new S3 Bucket for your front-end. + - Use the workflow id to mark the front-end infrastructure so that you can reference it easily later on. + - Tag the front-end infrastructure so that it can be referenced later. + - Generate an inventory file for use with Ansible by using AWS CLI to append the newly created backend IP to the [provided](https://github.com/udacity/cdond-c3-projectstarter/blob/master/.circleci/ansible/inventory.txt) inventory file. + - Persist the modified inventory file to the workspace so that we can use that file in future jobs. + +##### Configure Infrastructure + +- Find the job named `configure-infrastructure` in the config file. + - Write code to set up the EC2 intance to run as our back-end. + - Select a Docker image that supports Ansible. + - Add the SSH key fingerprint to job so that Ansible will have access to the EC2 instance via SSH. + - Attach the "workspace" to the job so that you have access to all the files you need (e.g. inventory file). + - Create an Ansible playbook named `configure-server.yml` in the `.circleci/ansible` folder to set up the backend server. Remember that you are running this Playbook against an EC2 instance that has been programmatically created (inside the CircleCI job). + - Use username `ubuntu`. + - Keep your playbook clean and maintainable by using roles. You will need to decide what roles to create and how to split up your code. + - Install Python, if needed. + - Update/upgrade packages. + - Install nodejs. + - Install pm2. + - Configure environment variables (use the `environment` module type in your role): + - `ENVIRONMENT`=`production` + - `TYPEORM_CONNECTION`=`postgres` + - `TYPEORM_ENTITIES`=`./modules/domain/**/*.entity{.ts,.js}` + - `TYPEORM_HOST`={your postgres database hostname in RDS} + - `TYPEORM_PORT`=`5532` (or the port from RDS if it’s different) + - `TYPEORM_USERNAME`={your postgres database username in RDS} + - `TYPEORM_PASSWORD`={your postgres database password in RDS} + - `TYPEORM_DATABASE`={your postgres database name in RDS} + - Install and [Configure PM2](https://www.digitalocean.com/community/tutorials/how-to-use-pm2-to-setup-a-node-js-production-environment-on-an-ubuntu-vps) to run back-end server. + +- Provide a URL to your public GitHub repository. **[URL01]** + +#### 2. Deploy Phase + +Now that the infrastructure is up and running, it’s time to configure for dependencies and move our application files over. UdaPeople used to have this ops guy in the other building to make the copy every Friday, but now they want to make a full deploy on every single commit. Luckily for UdaPeople, you’re about to add a job that handles this automatically using Ansible. The ops guy will finally have enough time to catch up on his Netflix playlist. + +##### Database migrations + +- Find the job named `run-migrations` in the config file. + - Select a Docker image that's compatible with NodeJS. + - Write code that runs database migrations so that new changes are applied. + - Save some evidence that any new migrations ran. This is useful information if you need to roll back. Hint: The migration output will include `"has been executed successfully"` if any new migrations were applied. + - Save the output to a file or variable. + - Use `grep` to check for text that shows that a new migration was applied. + - If true, send a "1" (or any value at all) to [MemStash.io](https://memstash.io) using a key that is bound to the workflow id like `migration_${CIRCLE_WORKFLOW_ID}`. + +##### Deploy Front-end + +- Find the job named `deploy-frontend` in the config file. + - Select a Docker image that can handle the AWS CLI. + - Write code to prepare the front-end code for distribution and deploy it. + - Install any additional dependencies + - Add the url of the newly created back-end server to the `API_URL` environment variable. This is important to be done before building the front-end in the next step because the build process will take the `API_URL` from the environment and "bake it" (hard-code it) into the front-end code. + - In a previous job, you created the back-end infrastructure and saved the IP address of the new EC2 instance. This is the IP address you will want to pull out and use here. If the IP address is "1.2.3.4", then the `API_URL` should be `https://1.2.3.4:3000`. + - Run `npm run build` one last time so that the back-end url gets "baked" into the front-end. + - Copy the files to your new S3 Bucket using AWS CLI (compiled front-end files can be found in a folder called `./dist`). +- Provide the public URL for your S3 Bucket (aka, your front-end). **[URL02]** + +##### Deploy Back-end + +- Find the job named `deploy-backend` in the config file. + - Select a Docker image that is compatible with Ansible. + - Create code to deploy the compiled backend files to the EC2 instance. + - Add the SSH key fingerprint to the job. + - Attach the "workspace" so that you have access to the previously generated `inventory.txt`. + - Install any necessary dependencies. + - Use Ansible to copy the files (compiled back-end files can be found in a folder called `./dist`). + +#### 3. Smoke Test Phase + +All this automated deployment stuff is great, but what if there’s something we didn’t plan for that made it through to production? What if the UdaPeople website is now down due to a runtime bug that our unit tests didn’t catch? Users won’t be able to access their data! This same situation can happen with manual deployments, too. In a manual deployment situation, what’s the first thing you do after you finish deploying? You do a “smoke test” by going to the site and making sure you can still log in or navigate around. You might do a quick `curl` on the backend to make sure it is responding. In an automated scenario, you can do the same thing through code. Let’s add a job to provide the UdaPeople team with a little sanity check. + +- Find the job named `smoke-test` in your config file. + - Select a lightweight Docker image like one of the Alpine images. + - Write code to make a simple test on both front-end and back-end. Use the suggested tests below or come up with your own. + - Install dependencies like `curl`. + - Test the back-end + - Retrieve the back-end IP address that you saved in an earlier job. + - Use `curl` to hit the back-end API's status endpoint (e.g. https://1.2.3.4:3000/api/status) + - No errors mean a successful test + - Test the front-end + - Form the front-end url using the workflow id and your AWS region like this: `URL="http://udapeople-${CIRCLE_WORKFLOW_ID}.s3-website-us-east-1.amazonaws.com"` + - Check the front-end to make sure it includes a word or two that proves it is working properly. + - No errors mean a successful test + ```bash + if curl -s ${URL} | grep "Welcome" + then + return 1 + else + return 0 + fi + ``` + +- Provide a screenshot for appropriate failure for the smoke test job. **[SCREENSHOT06]** + +![Job properly failing because of a failed smoke test.](screenshots/SCREENSHOT06.png) + +#### 4. Rollback Phase + +Of course, we all hope every pipeline follows the “happy path.” But any experienced UdaPeople developer knows that it’s not always the case. If the smoke test fails, what should we do? The smart thing would be to hit CTRL-Z and undo all our changes. But is it really that easy? It will be once you build the next job! + +- At the top of your config file, create a “[command](https://circleci.com/docs/2.0/reusing-config/#authoring-reusable-commands)” named `destroy-environment` to remove infrastructure if something goes wrong + - Trigger rollback jobs if the smoke tests or any following jobs fail. + - Delete files uploaded to S3. + - Destroy the current CloudFormation stacks using the same stack names you used when creating the stack earlier (front-end and back-end). +- At the top of your config file, create a “[command](https://circleci.com/docs/2.0/reusing-config/#authoring-reusable-commands)” named `revert-migrations` to roll back any migrations that were successfully applied during this CI/CD workflow + - Trigger rollback jobs if the smoke tests or any following jobs fail. + - Revert the last migration (IF a new migration was applied) on the database to that it goes back to the way it was before. You can use that value you saved in [MemStash.io](https://memstash.io) to know if you should revert any migrations. +- No more jobs should run after these commands have executed. +- Provide a screenshot for a successful rollback after a failed smoke test. **[SCREENSHOT07]** + +![Successful rollback job.](screenshots/SCREENSHOT07.png) + +- Add these rollback commands to other jobs that might fail and need a rollback. + +#### 5. Promotion Phase + +Assuming the smoke test came back clean, we should have a relatively high level of confidence that our deployment was a 99% success. Now’s time for the last 1%. UdaPeople uses the “Blue-Green Deployment Strategy” which means we deployed a second environment or stack next to our existing production stack. Now that we’re sure everything is "A-okay", we can switch from blue to green. + +- Find the job named `cloudfront-update` in your config file. + - Select a docker image that is compatible with AWS CLI. + - Create code that promotes our new front-end to production. + - Install any needed dependencies + - Use a [CloudFormation template](https://github.com/udacity/cdond-c3-projectstarter/tree/master/.circleci/files) to change the origin of your CloudFront distribution to the new S3 bucket. +- Provide a screenshot of the successful job. **[SCREENSHOT08]** + +![Successful promotion job.](screenshots/SCREENSHOT08.png) + +- Provide the public URL for your CloudFront distribution (aka, your production front-end). **[URL03]** +- Provide the public URL for your back-end server in EC2. **[URL04]** + +#### 6. Cleanup Phase + +The UdaPeople finance department likes it when your AWS bills are more or less the same as last month OR trending downward. But, what if all this “Blue-Green” is leaving behind a trail of dead-end production environments? That upward trend probably means no Christmas bonus for the dev team. Let’s make sure everyone at UdaPeople has a Merry Christmas by adding a job to clean up old stacks. + +- Find the job named `cleanup` in your config file. + - Write code that deletes the previous S3 bucket and EC2 instance. + - Query CloudFormation to find out the old stack's workflow id like this: + ``` + export OldWorkflowID=$(aws cloudformation \ + list-exports --query "Exports[?Name==\`WorkflowID\`].Value" \ + --no-paginate --output text) + export STACKS=($(aws cloudformation list-stacks --query "StackSummaries[*].StackName" \ + --stack-status-filter CREATE_COMPLETE --no-paginate --output text)) + ``` + - Remove old stacks/files + - Back-end stack (example: `aws cloudformation delete-stack --stack-name "udapeople-backend-${OldWorkflowID}"`) + - Front-end files in S3 (example: `aws s3 rm "s3://udapeople-${OldWorkflowID}" --recursive`) + - Front-end stack +- Provide a screenshot of the successful job. **[SCREENSHOT09]** + +![Successful cleanup job.](screenshots/SCREENSHOT09.png) + +#### Other Considerations + +- Make sure you only run deployment-related jobs on commits to the `master` branch. Provide screenshot of a build triggered by a non-master commit. It should only run the jobs prior to deployment. **[SCREENSHOT10]** + +![Deploy jobs only run on master](screenshots/SCREENSHOT10.png) diff --git a/instructions/4-turn-errors-into-sirens.md b/instructions/4-turn-errors-into-sirens.md new file mode 100644 index 0000000..5986574 --- /dev/null +++ b/instructions/4-turn-errors-into-sirens.md @@ -0,0 +1,40 @@ +## Section 4 - Surface Critical Server Errors for Diagnosis Using Centralized Logging + +Errors and unhealthy states are important to know about, wouldn’t you say? But, too often, server errors are silenced by hasty reboots or simply never having an outlet in the first place. If a server has an error in a forest, but no one is there to hear it, did it actually happen? Why is the server in the forest in the first place? + +UdaPeople chose Prometheus as a monitoring solution since it is open-source and versatile. Once configured properly, Prometheus will turn our server’s errors into sirens that no one can ignore. + +### Setup + +Please watch the [video walkthrough of how to set up your EC2 instance and Prometheus here](https://www.youtube.com/watch?v=PSXrbE54FqQ). + +- Manually create an EC2 instance and SSH into it. +- Set up Prometheus Server on EC2 following [these instructions](https://codewizardly.com/prometheus-on-aws-ec2-part1/). +- Configure Prometheus for AWS Service Discovery following [these instructions](https://codewizardly.com/prometheus-on-aws-ec2-part3/). + +### To Do + +#### 1. Setup Back-End Monitoring + +In order for server instances to speak to Prometheus, we need to install an “exporter” in each one. Create a job that uses Ansible to go into the EC2 instance and install the exporter. + +- Add a section to your back-end configuration job to install the `node_exporter` for Prometheus monitoring. This should be done using Ansible. Your playbook can simulate the steps in [this tutorial](https://codewizardly.com/prometheus-on-aws-ec2-part2/). +- After deploy, ensure your back-end is being discovered by the Prometheus Server. +- Provide a screenshot of a graph of your EC2 instance including available memory, available disk space, and CPU usage. **[SCREENSHOT11]** + +![Graphs of CPU, Disk and Memory utilization on systems being monitored.](screenshots/SCREENSHOT11.png) + +- Provide a public URL to your Prometheus Server. **[URL05]** + +#### 2. Setup Alerts + +Now that Prometheus and our EC2 instance have an open line of communication, we need to set up some alerts. The UdaPeople dev team loves their chat tool and wants to receive an alert in chat when the server starts running out of memory or disk space. Set up a job to make that dream a reality. + +- SSH into your Prometheus Server. +- Install and configure AlertManager by following [these instructions](https://codewizardly.com/prometheus-on-aws-ec2-part4/). +- You can decide if you will use Slack, email, or another messaging service. Our examples are using Slack, but you should feel free to use the messaging service to which you are most accustomed. +- Set up an alert for low memory or some condition you can control to intentionally cause an alert. +- Provide a screenshot of an alert that was sent by Prometheus. **[SCREENSHOT12]** + +![Alerts from a failing system that is being monitored.](screenshots/SCREENSHOT12.png) + diff --git a/instructions/screenshots/SCREENSHOT01.png b/instructions/screenshots/SCREENSHOT01.png new file mode 100644 index 0000000..00a4ff0 Binary files /dev/null and b/instructions/screenshots/SCREENSHOT01.png differ diff --git a/instructions/screenshots/SCREENSHOT02.png b/instructions/screenshots/SCREENSHOT02.png new file mode 100644 index 0000000..ac66900 Binary files /dev/null and b/instructions/screenshots/SCREENSHOT02.png differ diff --git a/instructions/screenshots/SCREENSHOT03.png b/instructions/screenshots/SCREENSHOT03.png new file mode 100644 index 0000000..29f9fca Binary files /dev/null and b/instructions/screenshots/SCREENSHOT03.png differ diff --git a/instructions/screenshots/SCREENSHOT04.png b/instructions/screenshots/SCREENSHOT04.png new file mode 100644 index 0000000..9895369 Binary files /dev/null and b/instructions/screenshots/SCREENSHOT04.png differ diff --git a/instructions/screenshots/SCREENSHOT05.png b/instructions/screenshots/SCREENSHOT05.png new file mode 100644 index 0000000..d15d930 Binary files /dev/null and b/instructions/screenshots/SCREENSHOT05.png differ diff --git a/instructions/screenshots/SCREENSHOT06.png b/instructions/screenshots/SCREENSHOT06.png new file mode 100644 index 0000000..8301ded Binary files /dev/null and b/instructions/screenshots/SCREENSHOT06.png differ diff --git a/instructions/screenshots/SCREENSHOT07.png b/instructions/screenshots/SCREENSHOT07.png new file mode 100644 index 0000000..b30955c Binary files /dev/null and b/instructions/screenshots/SCREENSHOT07.png differ diff --git a/instructions/screenshots/SCREENSHOT08.png b/instructions/screenshots/SCREENSHOT08.png new file mode 100644 index 0000000..62773ee Binary files /dev/null and b/instructions/screenshots/SCREENSHOT08.png differ diff --git a/instructions/screenshots/SCREENSHOT09.png b/instructions/screenshots/SCREENSHOT09.png new file mode 100644 index 0000000..62773ee Binary files /dev/null and b/instructions/screenshots/SCREENSHOT09.png differ diff --git a/instructions/screenshots/SCREENSHOT10.png b/instructions/screenshots/SCREENSHOT10.png new file mode 100644 index 0000000..13c1e07 Binary files /dev/null and b/instructions/screenshots/SCREENSHOT10.png differ diff --git a/instructions/screenshots/SCREENSHOT11.png b/instructions/screenshots/SCREENSHOT11.png new file mode 100644 index 0000000..9a4130e Binary files /dev/null and b/instructions/screenshots/SCREENSHOT11.png differ diff --git a/instructions/screenshots/SCREENSHOT12.png b/instructions/screenshots/SCREENSHOT12.png new file mode 100644 index 0000000..a1cdb9b Binary files /dev/null and b/instructions/screenshots/SCREENSHOT12.png differ diff --git a/instructions/screenshots/SCREENSHOT__service-discovery.png b/instructions/screenshots/SCREENSHOT__service-discovery.png new file mode 100644 index 0000000..62018eb Binary files /dev/null and b/instructions/screenshots/SCREENSHOT__service-discovery.png differ diff --git a/instructions/screenshots/readme.md b/instructions/screenshots/readme.md new file mode 100644 index 0000000..99b50f1 --- /dev/null +++ b/instructions/screenshots/readme.md @@ -0,0 +1 @@ +# Project Solution Screenshots diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..48e341a --- /dev/null +++ b/package-lock.json @@ -0,0 +1,3 @@ +{ + "lockfileVersion": 1 +} diff --git a/presentation.pdf b/presentation.pdf deleted file mode 100644 index 4372f69..0000000 Binary files a/presentation.pdf and /dev/null differ diff --git a/project3.zip b/project3.zip deleted file mode 100644 index b701df5..0000000 Binary files a/project3.zip and /dev/null differ diff --git a/screenshots/SCREENSHOT01.png b/screenshots/SCREENSHOT01.png deleted file mode 100644 index 9a501dc..0000000 Binary files a/screenshots/SCREENSHOT01.png and /dev/null differ diff --git a/screenshots/SCREENSHOT02.png b/screenshots/SCREENSHOT02.png deleted file mode 100644 index 5fcb763..0000000 Binary files a/screenshots/SCREENSHOT02.png and /dev/null differ diff --git a/screenshots/SCREENSHOT03.png b/screenshots/SCREENSHOT03.png deleted file mode 100644 index 76368c1..0000000 Binary files a/screenshots/SCREENSHOT03.png and /dev/null differ diff --git a/screenshots/SCREENSHOT04.png b/screenshots/SCREENSHOT04.png deleted file mode 100644 index f891f4b..0000000 Binary files a/screenshots/SCREENSHOT04.png and /dev/null differ diff --git a/screenshots/SCREENSHOT05.png b/screenshots/SCREENSHOT05.png deleted file mode 100644 index f8b55b2..0000000 Binary files a/screenshots/SCREENSHOT05.png and /dev/null differ diff --git a/screenshots/SCREENSHOT06.png b/screenshots/SCREENSHOT06.png deleted file mode 100644 index 1abac4a..0000000 Binary files a/screenshots/SCREENSHOT06.png and /dev/null differ diff --git a/screenshots/SCREENSHOT07.png b/screenshots/SCREENSHOT07.png deleted file mode 100644 index cd0b553..0000000 Binary files a/screenshots/SCREENSHOT07.png and /dev/null differ diff --git a/screenshots/SCREENSHOT08.png b/screenshots/SCREENSHOT08.png deleted file mode 100644 index 1136b08..0000000 Binary files a/screenshots/SCREENSHOT08.png and /dev/null differ diff --git a/screenshots/SCREENSHOT09.png b/screenshots/SCREENSHOT09.png deleted file mode 100644 index 514251e..0000000 Binary files a/screenshots/SCREENSHOT09.png and /dev/null differ diff --git a/screenshots/SCREENSHOT10.png b/screenshots/SCREENSHOT10.png deleted file mode 100644 index 4b01f88..0000000 Binary files a/screenshots/SCREENSHOT10.png and /dev/null differ diff --git a/screenshots/SCREENSHOT11 cpu.png b/screenshots/SCREENSHOT11 cpu.png deleted file mode 100644 index de2c6ba..0000000 Binary files a/screenshots/SCREENSHOT11 cpu.png and /dev/null differ diff --git a/screenshots/SCREENSHOT11 disk.png b/screenshots/SCREENSHOT11 disk.png deleted file mode 100644 index 08a6379..0000000 Binary files a/screenshots/SCREENSHOT11 disk.png and /dev/null differ diff --git a/screenshots/SCREENSHOT11 memory.png b/screenshots/SCREENSHOT11 memory.png deleted file mode 100644 index 2173b32..0000000 Binary files a/screenshots/SCREENSHOT11 memory.png and /dev/null differ diff --git a/screenshots/SCREENSHOT12.png b/screenshots/SCREENSHOT12.png deleted file mode 100644 index 0cac247..0000000 Binary files a/screenshots/SCREENSHOT12.png and /dev/null differ diff --git a/screenshots/URL02.png b/screenshots/URL02.png deleted file mode 100644 index 44fe85d..0000000 Binary files a/screenshots/URL02.png and /dev/null differ diff --git a/screenshots/URL03_SCREENSHOT.png b/screenshots/URL03_SCREENSHOT.png deleted file mode 100644 index bd4aa67..0000000 Binary files a/screenshots/URL03_SCREENSHOT.png and /dev/null differ diff --git a/screenshots/URL04_SCREENSHOT.png b/screenshots/URL04_SCREENSHOT.png deleted file mode 100644 index 5283b7b..0000000 Binary files a/screenshots/URL04_SCREENSHOT.png and /dev/null differ diff --git a/screenshots/URL05_SCREENSHOT.png b/screenshots/URL05_SCREENSHOT.png deleted file mode 100644 index b985795..0000000 Binary files a/screenshots/URL05_SCREENSHOT.png and /dev/null differ diff --git a/udapeople-pipeline.png b/udapeople-pipeline.png new file mode 100644 index 0000000..759ae6d Binary files /dev/null and b/udapeople-pipeline.png differ diff --git a/udapeople.png b/udapeople.png new file mode 100644 index 0000000..fa9fd65 Binary files /dev/null and b/udapeople.png differ diff --git a/url.txt b/url.txt deleted file mode 100644 index e22e6eb..0000000 --- a/url.txt +++ /dev/null @@ -1,2 +0,0 @@ -URL01 ==== https://github.com/abiola814/udapeople -